diff --git a/Makefile b/Makefile
index 302679bf4..d0541f8e1 100644
--- a/Makefile
+++ b/Makefile
@@ -42,7 +42,7 @@ else
endif
endif
-RESTIC_VER := 0.17.3
+RESTIC_VER := 0.18.1
###
### These variables should not need tweaking.
diff --git a/go.mod b/go.mod
index 3b947352c..fef3c7487 100644
--- a/go.mod
+++ b/go.mod
@@ -1,15 +1,15 @@
module stash.appscode.dev/vault
-go 1.24.0
+go 1.25
require (
- cloud.google.com/go/kms v1.15.5
- cloud.google.com/go/storage v1.36.0
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
+ cloud.google.com/go/kms v1.21.0
+ cloud.google.com/go/storage v1.51.0
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0
- github.com/aws/aws-sdk-go v1.44.100
+ github.com/aws/aws-sdk-go v1.55.6
github.com/hashicorp/vault/api v1.10.0
- github.com/spf13/cobra v1.8.0
+ github.com/spf13/cobra v1.10.1
go.bytebuilders.dev/license-verifier/kubernetes v0.14.10
gomodules.xyz/flags v0.1.3
gomodules.xyz/go-sh v0.1.0
@@ -17,29 +17,71 @@ require (
gomodules.xyz/password-generator v0.2.9
gomodules.xyz/pointer v0.1.0
gomodules.xyz/x v0.0.17
- google.golang.org/api v0.155.0
- google.golang.org/protobuf v1.34.2
- k8s.io/api v0.30.2
- k8s.io/apimachinery v0.30.2
- k8s.io/client-go v0.30.2
+ google.golang.org/api v0.228.0
+ google.golang.org/protobuf v1.36.10
+ k8s.io/api v0.34.3
+ k8s.io/apimachinery v0.34.3
+ k8s.io/client-go v0.34.3
k8s.io/klog/v2 v2.130.1
- kmodules.xyz/client-go v0.30.48
- kmodules.xyz/custom-resources v0.30.0
- kmodules.xyz/offshoot-api v0.30.1
+ kmodules.xyz/client-go v0.34.2
+ kmodules.xyz/custom-resources v0.34.0
+ kmodules.xyz/offshoot-api v0.34.0
kubevault.dev/apimachinery v0.18.3
- stash.appscode.dev/apimachinery v0.42.1-0.20251212070131-ba26c6b88bc6
+ stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48
)
require (
- cloud.google.com/go v0.112.0 // indirect
- cloud.google.com/go/compute/metadata v0.3.0 // indirect
- cloud.google.com/go/iam v1.1.5 // indirect
+ cel.dev/expr v0.24.0 // indirect
+ cloud.google.com/go/auth v0.15.0 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
+ cloud.google.com/go/longrunning v0.6.5 // indirect
+ cloud.google.com/go/monitoring v1.24.1 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
+ github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
+ github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
+ github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.5 // indirect
+ github.com/go-openapi/swag/cmdutils v0.25.1 // indirect
+ github.com/go-openapi/swag/conv v0.25.1 // indirect
+ github.com/go-openapi/swag/fileutils v0.25.1 // indirect
+ github.com/go-openapi/swag/jsonname v0.25.1 // indirect
+ github.com/go-openapi/swag/jsonutils v0.25.1 // indirect
+ github.com/go-openapi/swag/loading v0.25.1 // indirect
+ github.com/go-openapi/swag/mangling v0.25.1 // indirect
+ github.com/go-openapi/swag/netutils v0.25.1 // indirect
+ github.com/go-openapi/swag/stringutils v0.25.1 // indirect
+ github.com/go-openapi/swag/typeutils v0.25.1 // indirect
+ github.com/go-openapi/swag/yamlutils v0.25.1 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ github.com/zeebo/errs v1.4.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.36.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
+)
+
+require (
+ cloud.google.com/go v0.120.0 // indirect
+ cloud.google.com/go/compute/metadata v0.7.0 // indirect
+ cloud.google.com/go/iam v1.4.2 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
- github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
+ github.com/Masterminds/semver/v3 v3.3.1 // indirect
github.com/PuerkitoBio/purell v1.2.1 // indirect
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -47,33 +89,31 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch v5.9.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.13.0 // indirect
+ github.com/evanphx/json-patch v5.9.11+incompatible // indirect
+ github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.21.0 // indirect
- github.com/go-openapi/jsonreference v0.21.0 // indirect
- github.com/go-openapi/swag v0.23.0 // indirect
- github.com/go-sql-driver/mysql v1.8.1 // indirect
+ github.com/go-openapi/jsonpointer v0.22.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.2 // indirect
+ github.com/go-openapi/swag v0.25.1 // indirect
+ github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/go-containerregistry v0.19.1 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/go-containerregistry v0.20.7 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.0 // indirect
- github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.1 // indirect
+ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
@@ -85,84 +125,79 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/jmespath/go-jmespath v0.4.0 // indirect
- github.com/josharian/intern v1.0.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/onsi/gomega v1.33.1 // indirect
+ github.com/onsi/gomega v1.36.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 // indirect
- github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.46.0 // indirect
- github.com/prometheus/procfs v0.15.0 // indirect
- github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 // indirect
- github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe // indirect
- github.com/rancher/wrangler/v3 v3.0.0 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rancher/norman v0.5.2 // indirect
+ github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe // indirect
+ github.com/rancher/wrangler/v3 v3.2.0-rc.3 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
- github.com/sergi/go-diff v1.2.0 // indirect
+ github.com/sergi/go-diff v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/pflag v1.0.9 // indirect
github.com/yudai/gojsondiff v1.0.0 // indirect
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
- go.bytebuilders.dev/license-proxyserver v0.0.20 // indirect
+ go.bytebuilders.dev/license-proxyserver v0.0.24 // indirect
go.bytebuilders.dev/license-verifier v0.14.10 // indirect
- go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
+ go.opentelemetry.io/otel v1.36.0 // indirect
+ go.opentelemetry.io/otel/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.36.0 // indirect
golang.org/x/crypto v0.46.0 // indirect
- golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/net v0.47.0 // indirect
- golang.org/x/oauth2 v0.27.0 // indirect
+ golang.org/x/oauth2 v0.33.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
- golang.org/x/time v0.5.0 // indirect
+ golang.org/x/time v0.13.0 // indirect
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
gomodules.xyz/mergo v0.3.13 // indirect
gomodules.xyz/sets v0.2.1 // indirect
gomodules.xyz/wait v0.2.0 // indirect
- google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/grpc v1.62.1 // indirect
+ google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
+ google.golang.org/grpc v1.72.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiextensions-apiserver v0.30.2 // indirect
- k8s.io/apiserver v0.30.2 // indirect
- k8s.io/kube-aggregator v0.30.2 // indirect
- k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 // indirect
- k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
+ k8s.io/apiextensions-apiserver v0.34.3 // indirect
+ k8s.io/apiserver v0.34.3 // indirect
+ k8s.io/kube-aggregator v0.34.3 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
kmodules.xyz/apiversion v0.2.0 // indirect
- kmodules.xyz/monitoring-agent-api v0.29.0 // indirect
- kmodules.xyz/objectstore-api v0.29.1 // indirect
- kmodules.xyz/prober v0.29.0 // indirect
- sigs.k8s.io/controller-runtime v0.18.4 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
+ kmodules.xyz/monitoring-agent-api v0.34.0 // indirect
+ kmodules.xyz/objectstore-api v0.34.0 // indirect
+ kmodules.xyz/prober v0.34.0 // indirect
+ sigs.k8s.io/controller-runtime v0.22.4 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/yaml v1.6.0 // indirect
)
replace github.com/Masterminds/sprig/v3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8
-replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe
+replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.6
-replace k8s.io/apiserver => github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b
+replace k8s.io/apiserver => github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc
diff --git a/go.sum b/go.sum
index 09a88df29..03efe85f9 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -5,40 +7,65 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
+cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
+cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
+cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
-cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
+cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM=
-cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI=
+cloud.google.com/go/iam v1.4.2 h1:4AckGYAYsowXeHzsn/LCKWIwSWLkdb0eGjH8wWkd27Q=
+cloud.google.com/go/iam v1.4.2/go.mod h1:REGlrt8vSlh4dfCJfSEcNjLGq75wW75c5aU3FLOYq34=
+cloud.google.com/go/kms v1.21.0 h1:x3EeWKuYwdlo2HLse/876ZrKjk2L5r7Uexfm8+p6mSI=
+cloud.google.com/go/kms v1.21.0/go.mod h1:zoFXMhVVK7lQ3JC9xmhHMoQhnjEDZFoLAr5YMwzBLtk=
+cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
+cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
+cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q=
+cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY=
+cloud.google.com/go/monitoring v1.24.1 h1:vKiypZVFD/5a3BbQMvI4gZdl8445ITzXFh257XBgrS0=
+cloud.google.com/go/monitoring v1.24.1/go.mod h1:Z05d1/vn9NaujqY2voG6pVQXoJGbp+r3laV+LySt9K0=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
-cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go/storage v1.51.0 h1:ZVZ11zCiD7b3k+cH5lQs/qcNaoSz3U9I0jgwVzqDlCw=
+cloud.google.com/go/storage v1.51.0/go.mod h1:YEJfu/Ki3i5oHC/7jyTgsGZwdQ8P9hqMqvpi5kRKGgc=
+cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE=
+cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
+github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0 h1:xnO4sFyG8UH2fElBkcqLTOZsAajvKfnSlgBBW8dXYjw=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.12.0/go.mod h1:XD3DIOOVgBCO03OleB1fHjgktVRFxlT++KwKgIOewdM=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw=
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
-github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
+github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
+github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.2.1 h1:QsZ4TjvwiMpat6gBCBxEQI0rcS9ehtkKtSpiUnd9N28=
github.com/PuerkitoBio/purell v1.2.1/go.mod h1:ZwHcC/82TOaovDi//J/804umJFFmbOHPngi8iYYv/Eo=
@@ -49,8 +76,8 @@ github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloD
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go v1.44.100 h1:7I86bWNQB+HGDT5z/dJy61J7qgbgLoZ7O51C9eL6hrA=
-github.com/aws/aws-sdk-go v1.44.100/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
+github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -59,14 +86,12 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@@ -76,25 +101,27 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
-github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
-github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
-github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
+github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
+github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
+github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
@@ -103,35 +130,62 @@ github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
+github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
+github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
-github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
-github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
+github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
+github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU=
+github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ=
+github.com/go-openapi/swag v0.25.1 h1:6uwVsx+/OuvFVPqfQmOOPsqTcm5/GkBhNwLqIR916n8=
+github.com/go-openapi/swag v0.25.1/go.mod h1:bzONdGlT0fkStgGPd3bhZf1MnuPkf2YAys6h+jZipOo=
+github.com/go-openapi/swag/cmdutils v0.25.1 h1:nDke3nAFDArAa631aitksFGj2omusks88GF1VwdYqPY=
+github.com/go-openapi/swag/cmdutils v0.25.1/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
+github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0=
+github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs=
+github.com/go-openapi/swag/fileutils v0.25.1 h1:rSRXapjQequt7kqalKXdcpIegIShhTPXx7yw0kek2uU=
+github.com/go-openapi/swag/fileutils v0.25.1/go.mod h1:+NXtt5xNZZqmpIpjqcujqojGFek9/w55b3ecmOdtg8M=
+github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
+github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
+github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8=
+github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1/go.mod h1:kjmweouyPwRUEYMSrbAidoLMGeJ5p6zdHi9BgZiqmsg=
+github.com/go-openapi/swag/loading v0.25.1 h1:6OruqzjWoJyanZOim58iG2vj934TysYVptyaoXS24kw=
+github.com/go-openapi/swag/loading v0.25.1/go.mod h1:xoIe2EG32NOYYbqxvXgPzne989bWvSNoWoyQVWEZicc=
+github.com/go-openapi/swag/mangling v0.25.1 h1:XzILnLzhZPZNtmxKaz/2xIGPQsBsvmCjrJOWGNz/ync=
+github.com/go-openapi/swag/mangling v0.25.1/go.mod h1:CdiMQ6pnfAgyQGSOIYnZkXvqhnnwOn997uXZMAd/7mQ=
+github.com/go-openapi/swag/netutils v0.25.1 h1:2wFLYahe40tDUHfKT1GRC4rfa5T1B4GWZ+msEFA4Fl4=
+github.com/go-openapi/swag/netutils v0.25.1/go.mod h1:CAkkvqnUJX8NV96tNhEQvKz8SQo2KF0f7LleiJwIeRE=
+github.com/go-openapi/swag/stringutils v0.25.1 h1:Xasqgjvk30eUe8VKdmyzKtjkVjeiXx1Iz0zDfMNpPbw=
+github.com/go-openapi/swag/stringutils v0.25.1/go.mod h1:JLdSAq5169HaiDUbTvArA2yQxmgn4D6h4A+4HqVvAYg=
+github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3I3ysiFZqukA=
+github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8=
+github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk=
+github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg=
+github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
+github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw=
@@ -140,70 +194,57 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
-github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
-github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I=
+github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
-github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -254,28 +295,30 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY=
+github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
+github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b h1:pJA5vV5MDtLX3KP7tGuT05QTquydQyxNAy0R75chrDY=
-github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8=
-github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe h1:6nl5dIci8FTzM2hxZ89ufxTXUYqLr9kSGEPPwX87ryk=
-github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
+github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
+github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc h1:R5bKc1c8Qu7z+7+O0xNWxIPjCYuaHUVZ+dSfeCZEd+c=
+github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w=
+github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd h1:cpLV7Pr+pSo3kDYY4HsLZfbdF1WPQuPTP+Jo3hyoWzw=
+github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -288,11 +331,9 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -315,8 +356,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -325,10 +367,10 @@ github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JX
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
-github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
+github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
+github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
+github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -339,40 +381,41 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2 h1:6UsAv+jAevuGO2yZFU/BukV4o9NKnFMOuoouSA4G0ns=
-github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.2/go.mod h1:XYrdZw5dW12Cjkt4ndbeNZZTBp4UCHtW0ccR9+sTtPU=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1 h1:wyKanf+IFdbIqbDNYGt+f1dabLErLWtBaxd0KaAx4aM=
+github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.87.1/go.mod h1:WHiLZmOWVop/MoYvRD58LfnPeyE+dcITby/jQjg83Hw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
-github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
-github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 h1:AlRMRs5mHJcdiK83KKJyFVeybPMZ7dOUzC0l3k9aUa8=
-github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9/go.mod h1:dyjfXBsNiroPWOdUZe7diUOUSLf6HQ/r2kEpwH/8zas=
-github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe h1:ZD+h5ylTFvzjMDb/DS0R+q3FuoGSB6IFgd8bqRIrnZY=
-github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe/go.mod h1:A+DTKG05BZs1mOoCIB6UpiKo7j0dC6kSz3mgYju9Q20=
-github.com/rancher/wrangler/v3 v3.0.0 h1:IHHCA+vrghJDPxjtLk4fmeSCFhNe9fFzLFj3m2B0YpA=
-github.com/rancher/wrangler/v3 v3.0.0/go.mod h1:Dfckuuq7MJk2JWVBDywRlZXMxEyPxHy4XqGrPEzu5Eg=
+github.com/rancher/norman v0.5.2 h1:rwUKZ0QeVKJEtznhRdNQUMJtKjSoLYbFuPQGXm6xTxw=
+github.com/rancher/norman v0.5.2/go.mod h1:lDO9ylAYBwch9FiYyuuWlYd7+IxgRgh0ioDJBweC7t4=
+github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe h1:ckqJXsEOiyEolfnry5tJ6LDTNkeloYwdizGymrAIjTM=
+github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe/go.mod h1:sA4Fa3EAKYMqxvLWdAVZHkjnahHq5zYFXVFNQZSTyPs=
+github.com/rancher/wrangler/v3 v3.2.0-rc.3 h1:MySHWLxLLrGrM2sq5YYp7Ol1kQqYt9lvIzjGR50UZ+c=
+github.com/rancher/wrangler/v3 v3.2.0-rc.3/go.mod h1:0C5QyvSrQOff8gQQzpB/L/FF03EQycjR3unSJcKCHno=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -380,8 +423,8 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
@@ -394,28 +437,30 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
+github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
@@ -428,10 +473,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
-go.bytebuilders.dev/license-proxyserver v0.0.20 h1:gzRSwUmX/LSwPVE6T9oy5RLIutU1EeI7hmS+QGsYBY4=
-go.bytebuilders.dev/license-proxyserver v0.0.20/go.mod h1:2PJmjMCXncVyeP3fIVQ+hwZnuhmWSTmbcuEMBrFKIac=
+go.bytebuilders.dev/license-proxyserver v0.0.24 h1:IXFrxXmPM9s/t9UB75caumgR0rl56GuIPJxv/ifFTd0=
+go.bytebuilders.dev/license-proxyserver v0.0.24/go.mod h1:MUwspWlc+ScUdJIjb9BEeS8g7GadJ4hgf+cByNW2o9w=
go.bytebuilders.dev/license-verifier v0.14.10 h1:K4VZjaoDXQde8QtL2kzpgk0jHw3W5CxFK9vh78RbDbQ=
go.bytebuilders.dev/license-verifier v0.14.10/go.mod h1:+cr+kft45r9BbsmZ9D5MGK9CrOf0VL3kBuOd/MiahdA=
go.bytebuilders.dev/license-verifier/kubernetes v0.14.10 h1:5L7ICdR4kkOVy8rb0tMrTR3wC/yGvam9DIQvKHo8yk8=
@@ -439,20 +486,26 @@ go.bytebuilders.dev/license-verifier/kubernetes v0.14.10/go.mod h1:DXxySMXnkwJuG
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
-go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
@@ -462,6 +515,10 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -478,8 +535,6 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -513,9 +568,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
@@ -524,8 +577,8 @@ golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
-golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
+golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -553,7 +606,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -582,8 +634,8 @@ golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
+golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -594,7 +646,6 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
@@ -613,8 +664,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f h1:hTyhR4r+tj1Uq7/PpFxLTzbeA0LhMVp7bEYfhkzFjdY=
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f/go.mod h1:K3m7N+nBOlf91/tpv8REUGwsAgaKFwElQCuiLhm12AQ=
gomodules.xyz/flags v0.1.3 h1:jQ06+EfmoMv5NvjXvJon03dOhLU+FF0TQMWN7I6qpzs=
@@ -643,8 +692,8 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
-google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
+google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs=
+google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -658,33 +707,19 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 h1:qEFnJI6AnfZk0NNe8YTyXQh5i//Zxi4gBHwRgp76qpw=
+google.golang.org/genproto v0.0.0-20250324211829-b45e905df463/go.mod h1:SqIx1NV9hcvqdLHo7uNZDS5lrUJybQ3evo3+z/WBfA0=
+google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
+google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -692,6 +727,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -712,47 +749,48 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
-k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
+k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
+k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g=
+k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0=
+k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
+k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
+k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-aggregator v0.30.2 h1:0+yk/ED6foCprY8VmkDPUhngjaAPKsNTXB/UrtvbIz0=
-k8s.io/kube-aggregator v0.30.2/go.mod h1:EhqCfDdxysNWXk1wRL9SEHAdo1DKl6EULQagztkBcXE=
-k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 h1:T5TEV4a+pEjc+j9Xui3MGGeoDLIN6uzZrx8NYotFMgQ=
-k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-aggregator v0.34.3 h1:rKsZWTD2As4dKuv+zzdJU0uo5H7bFlAEoSucai4mW6M=
+k8s.io/kube-aggregator v0.34.3/go.mod h1:d4D8PV2FK4Qlq6u442FSum1tHPhK9tKdKBfH/A3R0I0=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kmodules.xyz/apiversion v0.2.0 h1:vAQYqZFm4xu4pbB1cAdHbFEPES6EQkcR4wc06xdTOWk=
kmodules.xyz/apiversion v0.2.0/go.mod h1:oPX8g8LvlPdPX3Yc5YvCzJHQnw3YF/X4/jdW0b1am80=
-kmodules.xyz/client-go v0.30.48 h1:TpDn03xq6xYATx+9kZ4PkiukIO1tgPrrip1UNq5QJlQ=
-kmodules.xyz/client-go v0.30.48/go.mod h1:8rqSBZeyAHatINE5VQNuIzbJJJbGSDZygAwFNyOXzP8=
-kmodules.xyz/custom-resources v0.30.0 h1:vR3CbseHMLwR4GvtcJJuRuwIV8voKqFqNii27rMcm1o=
-kmodules.xyz/custom-resources v0.30.0/go.mod h1:ZsTuI2mLG2s3byre7bHmpxJ9w0HDqAkRTL1+izGFI24=
-kmodules.xyz/monitoring-agent-api v0.29.0 h1:gpFl6OZrlMLb/ySMHdREI9EwGtnJ91oZBn9H1UFRwB4=
-kmodules.xyz/monitoring-agent-api v0.29.0/go.mod h1:iNbvaMTgVFOI5q2LJtGK91j4Dmjv4ZRiRdasGmWLKQI=
-kmodules.xyz/objectstore-api v0.29.1 h1:uUsjf8KU0w4LYowSEOnl0AbHT3hsHIu1wNLHqGe1o6s=
-kmodules.xyz/objectstore-api v0.29.1/go.mod h1:xG+5awH1SXYKxwN/+k1FEQvzixd5tgNqEN/1LEiB2FE=
-kmodules.xyz/offshoot-api v0.30.1 h1:TrulAYO+oBsXe9sZZGTmNWIuI8qD2izMpgcTSPvgAmI=
-kmodules.xyz/offshoot-api v0.30.1/go.mod h1:T3mpjR6fui0QzOcmQvIuANytW48fe9ytmy/1cgx6D4g=
-kmodules.xyz/prober v0.29.0 h1:Ex7m4F9rH7uWNNJlLgP63ROOM+nUATJkC2L5OQ7nwMg=
-kmodules.xyz/prober v0.29.0/go.mod h1:UtK+HKyI1lFLEKX+HFLyOCVju6TO93zv3kwGpzqmKOo=
+kmodules.xyz/client-go v0.34.2 h1:2Cec+nyfj9kfbR+5KPK3AksxN6h4jSjhn/tw+Dhqggo=
+kmodules.xyz/client-go v0.34.2/go.mod h1:kQRuGMxhb+B9rVdcfBzjK+PV7oBDo+SaDiQ66u1QG+4=
+kmodules.xyz/custom-resources v0.34.0 h1:ljkIYzIq0A3Awj87kkpuYqS9aifuyR3Hr9q2OVKoojM=
+kmodules.xyz/custom-resources v0.34.0/go.mod h1:pcA/n/CnrycjKCRNtU9Z+l5svhzFncLY2Kn9pqeXDVs=
+kmodules.xyz/monitoring-agent-api v0.34.0 h1:SNgKvC1j8oYWQcdClyV2T5GsOQoG40c3pK9aYKm0j8A=
+kmodules.xyz/monitoring-agent-api v0.34.0/go.mod h1:XFDfMHDZQeNEPdTDeDr4M0dT4UCWs+4IYzgHw7JDlms=
+kmodules.xyz/objectstore-api v0.34.0 h1:h16QGeKAJB27mj0rhvm3I0q0ulE99TgIulob/UkXzRc=
+kmodules.xyz/objectstore-api v0.34.0/go.mod h1:x6snmU8evi1K0qcWKrTXbkSN18hamcuGraYGCgtG9JY=
+kmodules.xyz/offshoot-api v0.34.0 h1:HnOOp8FrCjTWjtNApRDo6Ahe79tOlLrJmyye4xxO4Kk=
+kmodules.xyz/offshoot-api v0.34.0/go.mod h1:F+B59yYw4CZJ4uD4xu6C+mMLzIXUtuH7E+SbDICl9jE=
+kmodules.xyz/prober v0.34.0 h1:ElZkZYCjLaytAA0M8EH42To7i9gh1IIX+d0qfaIohys=
+kmodules.xyz/prober v0.34.0/go.mod h1:rsu/fxxfNxY70GDbH6Ju8G66459hi7AhWSSBoiIp8ic=
kubevault.dev/apimachinery v0.18.3 h1:Bq180AGBYnRXXNWbJ6Zg82+8/3M1Y8WYPez32uTry8I=
kubevault.dev/apimachinery v0.18.3/go.mod h1:b9uUVFx3a3ThDziL2J2O4xQL+muY1/pGavAhDdJC99E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-stash.appscode.dev/apimachinery v0.42.1-0.20251212070131-ba26c6b88bc6 h1:8c7pW5uSstbnW6eOd/UXeHd5HIHeFzZZ2Ehc4pwxlIM=
-stash.appscode.dev/apimachinery v0.42.1-0.20251212070131-ba26c6b88bc6/go.mod h1:XZiXo8eKlbOJ61CmA/zaW99IHE5YVsEg4QxSziMDCQ0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
+stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48 h1:wBJaQUeY5OAhB6UUg1BTfuFm3E1IWtlNEfgA/ya8Vsc=
+stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48/go.mod h1:TMNL2PGWlxWOQtspAPZZfIWJTHDmJBffqiVJ4dq4mLg=
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..13c50892b
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.3.2
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/cel.dev/expr/LICENSE b/vendor/cel.dev/expr/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cel.dev/expr/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..85ac9ff61
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,74 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.39.1",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20241220-5e258e33.bcr.1",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "googleapis-cc",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-java",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-go",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "27.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.17",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.53.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "7.0.2",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.22.0")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..42d67f87c
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,71 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..e3e533a04
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.3.2'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..a7aae0900
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,487 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.3
+// protoc v5.27.1
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExprValue) GetKind() isExprValue_Kind {
+ if x != nil {
+ return x.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Value); ok {
+ return x.Value
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Error); ok {
+ return x.Error
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type Status struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
+ 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
+ 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
+ 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
+ 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
+ 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
+ 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_cel_expr_eval_proto_goTypes = []any{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*Status)(nil), // 3: cel.expr.Status
+ (*UnknownSet)(nil), // 4: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
+ (*Value)(nil), // 6: cel.expr.Value
+ (*anypb.Any)(nil), // 7: google.protobuf.Any
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
+ 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore
new file mode 100644
index 000000000..cc7e53b46
--- /dev/null
+++ b/vendor/cloud.google.com/go/.gitignore
@@ -0,0 +1,12 @@
+# Editors
+.idea
+.vscode
+*.swp
+.history
+
+# Test files
+*.test
+coverage.txt
+
+# Other
+.DS_Store
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
new file mode 100644
index 000000000..512fdb07b
--- /dev/null
+++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
@@ -0,0 +1,16 @@
+{
+ "auth": "0.15.0",
+ "auth/oauth2adapt": "0.2.8",
+ "bigquery": "1.67.0",
+ "bigtable": "1.35.0",
+ "datastore": "1.20.0",
+ "errorreporting": "0.3.2",
+ "firestore": "1.18.0",
+ "logging": "1.13.0",
+ "profiler": "0.4.2",
+ "pubsub": "1.48.0",
+ "pubsublite": "1.8.2",
+ "spanner": "1.77.0",
+ "storage": "1.51.0",
+ "vertexai": "0.13.3"
+}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
new file mode 100644
index 000000000..58620ecb2
--- /dev/null
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -0,0 +1,156 @@
+{
+ "accessapproval": "1.8.5",
+ "accesscontextmanager": "1.9.5",
+ "advisorynotifications": "1.5.4",
+ "ai": "0.10.1",
+ "aiplatform": "1.78.0",
+ "alloydb": "1.15.0",
+ "analytics": "0.27.1",
+ "apigateway": "1.7.5",
+ "apigeeconnect": "1.7.5",
+ "apigeeregistry": "0.9.5",
+ "apihub": "0.1.4",
+ "apikeys": "1.2.5",
+ "appengine": "1.9.5",
+ "apphub": "0.2.4",
+ "apps": "0.7.1",
+ "area120": "0.9.5",
+ "artifactregistry": "1.16.3",
+ "asset": "1.20.5",
+ "assuredworkloads": "1.12.5",
+ "automl": "1.14.6",
+ "backupdr": "1.3.1",
+ "baremetalsolution": "1.3.5",
+ "batch": "1.12.1",
+ "beyondcorp": "1.1.5",
+ "billing": "1.20.3",
+ "binaryauthorization": "1.9.4",
+ "certificatemanager": "1.9.4",
+ "channel": "1.19.4",
+ "chat": "0.12.1",
+ "cloudbuild": "1.22.1",
+ "cloudcontrolspartner": "1.3.1",
+ "clouddms": "1.8.6",
+ "cloudprofiler": "0.4.4",
+ "cloudquotas": "1.3.2",
+ "cloudtasks": "1.13.5",
+ "commerce": "1.2.3",
+ "compute": "1.35.0",
+ "compute/metadata": "0.6.0",
+ "confidentialcomputing": "1.9.1",
+ "config": "1.3.1",
+ "contactcenterinsights": "1.17.2",
+ "container": "1.42.3",
+ "containeranalysis": "0.13.4",
+ "datacatalog": "1.25.0",
+ "dataflow": "0.10.5",
+ "dataform": "0.11.1",
+ "datafusion": "1.8.5",
+ "datalabeling": "0.9.5",
+ "dataplex": "1.23.1",
+ "dataproc": "2.11.1",
+ "dataqna": "0.9.5",
+ "datastream": "1.13.2",
+ "deploy": "1.26.3",
+ "developerconnect": "0.3.2",
+ "dialogflow": "1.68.1",
+ "discoveryengine": "1.16.2",
+ "dlp": "1.22.0",
+ "documentai": "1.36.0",
+ "domains": "0.10.5",
+ "edgecontainer": "1.4.2",
+ "edgenetwork": "1.2.4",
+ "essentialcontacts": "1.7.5",
+ "eventarc": "1.15.4",
+ "filestore": "1.10.1",
+ "financialservices": "0.1.1",
+ "functions": "1.19.4",
+ "gkebackup": "1.6.4",
+ "gkeconnect": "0.12.3",
+ "gkehub": "0.15.5",
+ "gkemulticloud": "1.5.2",
+ "grafeas": "0.3.15",
+ "gsuiteaddons": "1.7.6",
+ "iam": "1.4.2",
+ "iap": "1.10.5",
+ "identitytoolkit": "0.2.4",
+ "ids": "1.5.5",
+ "iot": "1.8.5",
+ "kms": "1.21.1",
+ "language": "1.14.4",
+ "lifesciences": "0.10.5",
+ "longrunning": "0.6.6",
+ "managedidentities": "1.7.5",
+ "managedkafka": "0.5.0",
+ "maps": "1.20.1",
+ "mediatranslation": "0.9.5",
+ "memcache": "1.11.5",
+ "memorystore": "0.2.1",
+ "metastore": "1.14.5",
+ "migrationcenter": "1.1.4",
+ "modelarmor": "0.1.0",
+ "monitoring": "1.24.1",
+ "netapp": "1.7.1",
+ "networkconnectivity": "1.16.3",
+ "networkmanagement": "1.18.2",
+ "networksecurity": "0.10.5",
+ "networkservices": "0.2.4",
+ "notebooks": "1.12.5",
+ "optimization": "1.7.5",
+ "oracledatabase": "0.3.0",
+ "orchestration": "1.11.7",
+ "orgpolicy": "1.14.3",
+ "osconfig": "1.14.4",
+ "oslogin": "1.14.5",
+ "parallelstore": "0.10.1",
+ "parametermanager": "0.1.1",
+ "phishingprotection": "0.9.5",
+ "policysimulator": "0.3.5",
+ "policytroubleshooter": "1.11.5",
+ "privatecatalog": "0.10.6",
+ "privilegedaccessmanager": "0.2.4",
+ "rapidmigrationassessment": "1.1.5",
+ "recaptchaenterprise": "2.20.2",
+ "recommendationengine": "0.9.5",
+ "recommender": "1.13.4",
+ "redis": "1.18.1",
+ "resourcemanager": "1.10.5",
+ "retail": "1.19.3",
+ "run": "1.9.2",
+ "scheduler": "1.11.6",
+ "secretmanager": "1.14.6",
+ "securesourcemanager": "1.3.2",
+ "security": "1.18.4",
+ "securitycenter": "1.36.1",
+ "securitycentermanagement": "1.1.4",
+ "securityposture": "0.2.4",
+ "servicecontrol": "1.14.4",
+ "servicedirectory": "1.12.5",
+ "servicehealth": "1.2.2",
+ "servicemanagement": "1.10.5",
+ "serviceusage": "1.9.5",
+ "shell": "1.8.5",
+ "shopping": "0.18.0",
+ "speech": "1.26.1",
+ "storageinsights": "1.1.5",
+ "storagetransfer": "1.12.3",
+ "streetview": "0.2.4",
+ "support": "1.1.5",
+ "talent": "1.8.2",
+ "telcoautomation": "1.1.4",
+ "texttospeech": "1.11.2",
+ "tpu": "1.8.2",
+ "trace": "1.11.5",
+ "translate": "1.12.4",
+ "video": "1.23.4",
+ "videointelligence": "1.12.5",
+ "vision": "2.9.4",
+ "visionai": "0.4.4",
+ "vmmigration": "1.8.5",
+ "vmwareengine": "1.3.4",
+ "vpcaccess": "1.8.5",
+ "webrisk": "1.10.5",
+ "websecurityscanner": "1.7.5",
+ "workflows": "1.14.0",
+ "workstations": "1.1.4"
+}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
new file mode 100644
index 000000000..441e0825c
--- /dev/null
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -0,0 +1,3 @@
+{
+ ".": "0.120.0"
+}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
new file mode 100644
index 000000000..28a3338a6
--- /dev/null
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -0,0 +1,2716 @@
+# Changes
+
+
+
+## [0.120.0](https://github.com/googleapis/google-cloud-go/compare/v0.119.0...v0.120.0) (2025-03-20)
+
+
+### Features
+
+* **civil:** Implement database/sql.Scanner|Valuer ([#1145](https://github.com/googleapis/google-cloud-go/issues/1145)) ([#11808](https://github.com/googleapis/google-cloud-go/issues/11808)) ([cbe4419](https://github.com/googleapis/google-cloud-go/commit/cbe4419c17f677c05f3f52c2080861adce705db4))
+
+
+### Bug Fixes
+
+* **third_party/pkgsite:** Increase comment size limit ([#11877](https://github.com/googleapis/google-cloud-go/issues/11877)) ([587b5cc](https://github.com/googleapis/google-cloud-go/commit/587b5ccc684ad99cb9eeba897304b7143564d423))
+
+## [0.119.0](https://github.com/googleapis/google-cloud-go/compare/v0.118.3...v0.119.0) (2025-03-11)
+
+
+### Features
+
+* **main:** Add support for listening on custom host to internal/testutil ([#11780](https://github.com/googleapis/google-cloud-go/issues/11780)) ([9608a09](https://github.com/googleapis/google-cloud-go/commit/9608a09a5d41778c7bb93792b5d5128d7081d4a6)), refs [#11586](https://github.com/googleapis/google-cloud-go/issues/11586)
+
+## [0.118.3](https://github.com/googleapis/google-cloud-go/compare/v0.118.2...v0.118.3) (2025-02-20)
+
+
+### Bug Fixes
+
+* **main:** Bump github.com/envoyproxy/go-control-plane/envoy to v1.32.4 ([#11591](https://github.com/googleapis/google-cloud-go/issues/11591)) ([d52451a](https://github.com/googleapis/google-cloud-go/commit/d52451aa22fb7120e37b43161d3d3103c19e5943))
+
+## [0.118.2](https://github.com/googleapis/google-cloud-go/compare/v0.118.1...v0.118.2) (2025-02-06)
+
+
+### Bug Fixes
+
+* **internal/godocfx:** Don't save timestamps until modules are successfully processed ([#11563](https://github.com/googleapis/google-cloud-go/issues/11563)) ([8f38b3d](https://github.com/googleapis/google-cloud-go/commit/8f38b3d912354027c30977b5adc928e0c6eff7a9))
+* **internal/godocfx:** Retry go get with explicit envoy dependency ([#11564](https://github.com/googleapis/google-cloud-go/issues/11564)) ([a06a6a5](https://github.com/googleapis/google-cloud-go/commit/a06a6a5542939b6239e1ec2c944eb1aae56745d9))
+
+## [0.118.1](https://github.com/googleapis/google-cloud-go/compare/v0.118.0...v0.118.1) (2025-01-30)
+
+
+### Bug Fixes
+
+* **main:** Remove OpenCensus dependency ([6243d91](https://github.com/googleapis/google-cloud-go/commit/6243d910b2bb502211d8308f9cc7723829d9f844))
+
+## [0.118.0](https://github.com/googleapis/google-cloud-go/compare/v0.117.0...v0.118.0) (2025-01-02)
+
+
+### Features
+
+* **civil:** Add AddMonths, AddYears and Weekday methods to Date ([#11340](https://github.com/googleapis/google-cloud-go/issues/11340)) ([d45f1a0](https://github.com/googleapis/google-cloud-go/commit/d45f1a01ebff868418aa14fe762ef7d1334f797d))
+
+## [0.117.0](https://github.com/googleapis/google-cloud-go/compare/v0.116.0...v0.117.0) (2024-12-16)
+
+
+### Features
+
+* **internal/trace:** Remove previously deprecated OpenCensus support ([#11230](https://github.com/googleapis/google-cloud-go/issues/11230)) ([40cf125](https://github.com/googleapis/google-cloud-go/commit/40cf1251c9d73be435585ce204a63588446c72b1)), refs [#10287](https://github.com/googleapis/google-cloud-go/issues/10287)
+* **transport:** Remove deprecated EXPERIMENTAL OpenCensus trace context propagation ([#11239](https://github.com/googleapis/google-cloud-go/issues/11239)) ([0d1ac87](https://github.com/googleapis/google-cloud-go/commit/0d1ac87174ed8526ea47d71a80e641ffbd687a6c)), refs [#10287](https://github.com/googleapis/google-cloud-go/issues/10287) [#11230](https://github.com/googleapis/google-cloud-go/issues/11230)
+
+## [0.116.0](https://github.com/googleapis/google-cloud-go/compare/v0.115.1...v0.116.0) (2024-10-09)
+
+
+### Features
+
+* **genai:** Add tokenizer package ([#10699](https://github.com/googleapis/google-cloud-go/issues/10699)) ([214af16](https://github.com/googleapis/google-cloud-go/commit/214af1604bf3837f68e96dbf81c1331b90c9375f))
+
+## [0.115.1](https://github.com/googleapis/google-cloud-go/compare/v0.115.0...v0.115.1) (2024-08-13)
+
+
+### Bug Fixes
+
+* **cloud.google.com/go:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
+## [0.115.0](https://github.com/googleapis/google-cloud-go/compare/v0.114.0...v0.115.0) (2024-06-12)
+
+
+### Features
+
+* **internal/trace:** Deprecate OpenCensus support ([#10287](https://github.com/googleapis/google-cloud-go/issues/10287)) ([430ce8a](https://github.com/googleapis/google-cloud-go/commit/430ce8adea2d0be43461e2ca783b7c17794e983f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205) [#8655](https://github.com/googleapis/google-cloud-go/issues/8655)
+
+
+### Bug Fixes
+
+* **internal/postprocessor:** Use approved image tag ([#10341](https://github.com/googleapis/google-cloud-go/issues/10341)) ([a388fe5](https://github.com/googleapis/google-cloud-go/commit/a388fe5cf075d0af986861c70dcb7b9f97c31019))
+
+## [0.114.0](https://github.com/googleapis/google-cloud-go/compare/v0.113.0...v0.114.0) (2024-05-23)
+
+
+### Features
+
+* **civil:** Add Compare method to Date, Time, and DateTime ([#10193](https://github.com/googleapis/google-cloud-go/issues/10193)) ([c2920d7](https://github.com/googleapis/google-cloud-go/commit/c2920d7c9007a11d9232c628fba5496197deeba4))
+
+
+### Bug Fixes
+
+* **internal/postprocessor:** Add scopes to all appropriate commit lines ([#10192](https://github.com/googleapis/google-cloud-go/issues/10192)) ([c21399b](https://github.com/googleapis/google-cloud-go/commit/c21399bdc362c6c646c2c0f8c2c55903898e0eab))
+
+## [0.113.0](https://github.com/googleapis/google-cloud-go/compare/v0.112.2...v0.113.0) (2024-05-08)
+
+
+### Features
+
+* **civil:** Add Compare method to Date, Time, and DateTime ([#10010](https://github.com/googleapis/google-cloud-go/issues/10010)) ([34455c1](https://github.com/googleapis/google-cloud-go/commit/34455c15d62b089f3281ff4c663245e72b257f37))
+
+
+### Bug Fixes
+
+* **all:** Bump x/net to v0.24.0 ([#10000](https://github.com/googleapis/google-cloud-go/issues/10000)) ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+* **debugger:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90))
+* **internal/aliasfix:** Handle import paths correctly ([#10097](https://github.com/googleapis/google-cloud-go/issues/10097)) ([fafaf0d](https://github.com/googleapis/google-cloud-go/commit/fafaf0d0a293096559a4655ea61062cb896f1568))
+* **rpcreplay:** Properly unmarshal dynamic message ([#9774](https://github.com/googleapis/google-cloud-go/issues/9774)) ([53ccb20](https://github.com/googleapis/google-cloud-go/commit/53ccb20d925ccb00f861958d9658b55738097dc6)), refs [#9773](https://github.com/googleapis/google-cloud-go/issues/9773)
+
+
+### Documentation
+
+* **testing:** Switch deprecated WithInsecure to WithTransportCredentials ([#10091](https://github.com/googleapis/google-cloud-go/issues/10091)) ([2b576ab](https://github.com/googleapis/google-cloud-go/commit/2b576abd1c3bfca2f962de0e024524f72d3652c0))
+
+## [0.112.2](https://github.com/googleapis/google-cloud-go/compare/v0.112.1...v0.112.2) (2024-03-27)
+
+
+### Bug Fixes
+
+* **all:** Release protobuf dep bump ([#9586](https://github.com/googleapis/google-cloud-go/issues/9586)) ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## [0.112.1](https://github.com/googleapis/google-cloud-go/compare/v0.112.0...v0.112.1) (2024-02-26)
+
+
+### Bug Fixes
+
+* **internal/postprocessor:** Handle googleapis link in commit body ([#9251](https://github.com/googleapis/google-cloud-go/issues/9251)) ([1dd3515](https://github.com/googleapis/google-cloud-go/commit/1dd35157bff871a2b3e5b0e3cac33502737fd631))
+
+
+### Documentation
+
+* **main:** Add OpenTelemetry-Go compatibility warning to debug.md ([#9268](https://github.com/googleapis/google-cloud-go/issues/9268)) ([18f9bb9](https://github.com/googleapis/google-cloud-go/commit/18f9bb94fbc239255a873b29462fc7c2eac3c0aa)), refs [#9267](https://github.com/googleapis/google-cloud-go/issues/9267)
+
+## [0.112.0](https://github.com/googleapis/google-cloud-go/compare/v0.111.0...v0.112.0) (2024-01-11)
+
+
+### Features
+
+* **internal/trace:** Export internal/trace package constants and vars ([#9242](https://github.com/googleapis/google-cloud-go/issues/9242)) ([941c16f](https://github.com/googleapis/google-cloud-go/commit/941c16f3a2602e9bdc737b139060a7dd8318f9dd))
+
+
+### Documentation
+
+* **main:** Add telemetry discussion to debug.md ([#9074](https://github.com/googleapis/google-cloud-go/issues/9074)) ([90ed12e](https://github.com/googleapis/google-cloud-go/commit/90ed12e1dffe722b42f58556f0e17b808da9714d)), refs [#8655](https://github.com/googleapis/google-cloud-go/issues/8655)
+
+## [0.111.0](https://github.com/googleapis/google-cloud-go/compare/v0.110.10...v0.111.0) (2023-11-29)
+
+
+### Features
+
+* **internal/trace:** Add OpenTelemetry support ([#8655](https://github.com/googleapis/google-cloud-go/issues/8655)) ([7a46b54](https://github.com/googleapis/google-cloud-go/commit/7a46b5428f239871993d66be2c7c667121f60a6f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205)
+
+
+### Bug Fixes
+
+* **all:** Bump google.golang.org/api to v0.149.0 ([#8959](https://github.com/googleapis/google-cloud-go/issues/8959)) ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
+
+## [0.110.10](https://github.com/googleapis/google-cloud-go/compare/v0.110.9...v0.110.10) (2023-10-31)
+
+
+### Bug Fixes
+
+* **all:** Update grpc-go to v1.56.3 ([#8916](https://github.com/googleapis/google-cloud-go/issues/8916)) ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
+* **all:** Update grpc-go to v1.59.0 ([#8922](https://github.com/googleapis/google-cloud-go/issues/8922)) ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
+* **internal/godocfx:** Fix links to other packages in summary ([#8756](https://github.com/googleapis/google-cloud-go/issues/8756)) ([6220a9a](https://github.com/googleapis/google-cloud-go/commit/6220a9afeb89df3080e9e663e97648939fd4e15f))
+
+## [0.110.9](https://github.com/googleapis/google-cloud-go/compare/v0.110.8...v0.110.9) (2023-10-19)
+
+
+### Bug Fixes
+
+* **all:** Update golang.org/x/net to v0.17.0 ([#8705](https://github.com/googleapis/google-cloud-go/issues/8705)) ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/aliasgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/examples/fake:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/gapicgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/generated/snippets:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/godocfx:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **internal/postprocessor:** Add ability to override release level ([#8643](https://github.com/googleapis/google-cloud-go/issues/8643)) ([26c608a](https://github.com/googleapis/google-cloud-go/commit/26c608a8204d740767dfebf6aa473cdf1873e5f0))
+* **internal/postprocessor:** Add missing assignment ([#8646](https://github.com/googleapis/google-cloud-go/issues/8646)) ([d8c5746](https://github.com/googleapis/google-cloud-go/commit/d8c5746e6dde1bd34c01a9886804f861c88c0cb7))
+* **internal/postprocessor:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+
+## [0.110.8](https://github.com/googleapis/google-cloud-go/compare/v0.110.7...v0.110.8) (2023-09-11)
+
+
+### Documentation
+
+* **postprocessor:** Nudge users towards stable clients ([#8513](https://github.com/googleapis/google-cloud-go/issues/8513)) ([05a1484](https://github.com/googleapis/google-cloud-go/commit/05a1484b0752aaa3d6a164d37686d6de070cc78d))
+
+## [0.110.7](https://github.com/googleapis/google-cloud-go/compare/v0.110.6...v0.110.7) (2023-07-31)
+
+
+### Bug Fixes
+
+* **main:** Add more docs to base package ([c401ab4](https://github.com/googleapis/google-cloud-go/commit/c401ab4a576c64ab2b8840a90f7ccd5d031cea57))
+
+## [0.110.6](https://github.com/googleapis/google-cloud-go/compare/v0.110.5...v0.110.6) (2023-07-13)
+
+
+### Bug Fixes
+
+* **httpreplay:** Ignore GCS header by default ([#8260](https://github.com/googleapis/google-cloud-go/issues/8260)) ([b961a1a](https://github.com/googleapis/google-cloud-go/commit/b961a1abe7aeafe420c88eed38035fed0bbf7bbe)), refs [#8233](https://github.com/googleapis/google-cloud-go/issues/8233)
+
+## [0.110.5](https://github.com/googleapis/google-cloud-go/compare/v0.110.4...v0.110.5) (2023-07-07)
+
+
+### Bug Fixes
+
+* **logadmin:** Use consistent filter in paging example ([#8221](https://github.com/googleapis/google-cloud-go/issues/8221)) ([9570159](https://github.com/googleapis/google-cloud-go/commit/95701597b1d709543ea22a4b6ff9b28b14a2d4fc))
+
+## [0.110.4](https://github.com/googleapis/google-cloud-go/compare/v0.110.3...v0.110.4) (2023-07-05)
+
+
+### Bug Fixes
+
+* **internal/retry:** Simplify gRPC status code mapping of retry error ([#8196](https://github.com/googleapis/google-cloud-go/issues/8196)) ([e8b224a](https://github.com/googleapis/google-cloud-go/commit/e8b224a3bcb0ca9430990ef6ae8ddb7b60f5225d))
+
+## [0.110.3](https://github.com/googleapis/google-cloud-go/compare/v0.110.2...v0.110.3) (2023-06-23)
+
+
+### Bug Fixes
+
+* **internal/retry:** Never return nil from GRPCStatus() ([#8128](https://github.com/googleapis/google-cloud-go/issues/8128)) ([005d2df](https://github.com/googleapis/google-cloud-go/commit/005d2dfb6b68bf5a35bfb8db449d3f0084b34d6e))
+
+
+### Documentation
+
+* **v1:** Minor clarifications for TaskGroup and min_cpu_platform ([3382ef8](https://github.com/googleapis/google-cloud-go/commit/3382ef81b6bcefe1c7bfc14aa5ff9bbf25850966))
+
+## [0.110.2](https://github.com/googleapis/google-cloud-go/compare/v0.110.1...v0.110.2) (2023-05-08)
+
+
+### Bug Fixes
+
+* **deps:** Update grpc to v1.55.0 ([#7885](https://github.com/googleapis/google-cloud-go/issues/7885)) ([9fc48a9](https://github.com/googleapis/google-cloud-go/commit/9fc48a921428c94c725ea90415d55ff0c177dd81))
+
+## [0.110.1](https://github.com/googleapis/google-cloud-go/compare/v0.110.0...v0.110.1) (2023-05-03)
+
+
+### Bug Fixes
+
+* **httpreplay:** Add ignore-header flag, fix tests ([#7865](https://github.com/googleapis/google-cloud-go/issues/7865)) ([1829706](https://github.com/googleapis/google-cloud-go/commit/1829706c5ade36cc786b2e6780fda5e7302f965b))
+
+## [0.110.0](https://github.com/googleapis/google-cloud-go/compare/v0.109.0...v0.110.0) (2023-02-15)
+
+
+### Features
+
+* **internal/postprocessor:** Detect and initialize new modules ([#7288](https://github.com/googleapis/google-cloud-go/issues/7288)) ([59ce02c](https://github.com/googleapis/google-cloud-go/commit/59ce02c13f265741a8f1f0f7ad5109bf83e3df82))
+* **internal/postprocessor:** Only regen snippets for changed modules ([#7300](https://github.com/googleapis/google-cloud-go/issues/7300)) ([220f8a5](https://github.com/googleapis/google-cloud-go/commit/220f8a5ad2fd64b75c5a1af531b1ab4597cf17d7))
+
+
+### Bug Fixes
+
+* **internal/postprocessor:** Add scopes without OwlBot api-name feature ([#7404](https://github.com/googleapis/google-cloud-go/issues/7404)) ([f7fe4f6](https://github.com/googleapis/google-cloud-go/commit/f7fe4f68ebf2ca28efd282f3419329dd2c09d245))
+* **internal/postprocessor:** Include module and package in scope ([#7294](https://github.com/googleapis/google-cloud-go/issues/7294)) ([d2c5c84](https://github.com/googleapis/google-cloud-go/commit/d2c5c8449f6939301f0fd506282e8fc73fc84f96))
+
+## [0.109.0](https://github.com/googleapis/google-cloud-go/compare/v0.108.0...v0.109.0) (2023-01-18)
+
+
+### Features
+
+* **internal/postprocessor:** Make OwlBot postprocessor ([#7202](https://github.com/googleapis/google-cloud-go/issues/7202)) ([7a1022e](https://github.com/googleapis/google-cloud-go/commit/7a1022e215261d679c8496cdd35a9cad1f13e527))
+
+## [0.108.0](https://github.com/googleapis/google-cloud-go/compare/v0.107.0...v0.108.0) (2023-01-05)
+
+
+### Features
+
+* **all:** Enable REGAPIC and REST numeric enums ([#6999](https://github.com/googleapis/google-cloud-go/issues/6999)) ([28f3572](https://github.com/googleapis/google-cloud-go/commit/28f3572addb0f563a2a42a76977b4e083191613f))
+* **debugger:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** Disable rest for non-rest APIs ([#7157](https://github.com/googleapis/google-cloud-go/issues/7157)) ([ab332ce](https://github.com/googleapis/google-cloud-go/commit/ab332ced06f6c07909444e4528c02a8b6a0a70a6))
+
+## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15)
+
+
+### Features
+
+* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b))
+
+## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09)
+
+
+### Features
+
+* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
+
+## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24)
+
+
+### Features
+
+* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490))
+
+## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29)
+
+
+### Features
+
+* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68))
+
+## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17)
+
+
+### Bug Fixes
+
+* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35))
+
+## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24)
+
+
+### Features
+
+* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187))
+
+### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03)
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b))
+
+## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20)
+
+
+### Features
+
+* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682))
+* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095))
+* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b))
+* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672))
+* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8))
+* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059))
+
+## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04)
+
+
+### Features
+
+* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
+* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
+* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a))
+
+
+### Bug Fixes
+
+* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
+* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043))
+* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7))
+
+## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06)
+
+
+### Features
+
+* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774))
+
+## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03)
+
+
+### Features
+
+* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
+* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5))
+* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
+* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
+* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
+* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
+* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
+* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353))
+* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8))
+* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78))
+* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
+* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d))
+* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
+* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
+* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
+* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03))
+* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
+* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
+* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
+
+
+### Bug Fixes
+
+* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
+* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062)
+* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
+* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
+* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99))
+* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47))
+* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2))
+* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e))
+* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363))
+* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da))
+* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
+* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
+
+## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29)
+
+
+### Features
+
+* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902)
+
+## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28)
+
+
+### Features
+
+* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727)
+* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2))
+* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933))
+* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184))
+
+## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21)
+
+### Bug Fixes
+
+* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da))
+* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa))
+
+### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02)
+
+
+### Bug Fixes
+
+* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713)
+
+## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31)
+
+
+### Features
+
+* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b))
+* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082))
+* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642)
+* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e))
+* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510))
+* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6))
+* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
+* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b))
+* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
+* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+
+
+### Bug Fixes
+
+* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
+
+## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16)
+
+
+### Features
+
+* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061)
+* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294)
+* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083))
+
+## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11)
+
+
+### Features
+
+* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6))
+* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
+* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
+* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
+
+
+### Bug Fixes
+
+* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe))
+
+## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03)
+
+
+### ⚠ BREAKING CHANGES
+
+* **compute:** add pagination and an Operation wrapper (#4542)
+
+### Features
+
+* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e))
+* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a))
+* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437))
+
+
+### Bug Fixes
+
+* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352))
+
+## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29)
+
+
+### Features
+
+* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
+* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
+* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63))
+* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
+* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a))
+* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434)
+
+## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22)
+
+
+### ⚠ BREAKING CHANGES
+
+* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported.
+
+### Features
+
+* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
+* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
+* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
+* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957))
+
+
+### Bug Fixes
+
+* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0))
+* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447)
+* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
+
+## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13)
+
+
+### Features
+
+* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
+* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567))
+* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
+* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
+* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
+
+
+### Bug Fixes
+
+* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167))
+* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391))
+
+
+### Miscellaneous Chores
+
+* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15))
+
+## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01)
+
+
+### Features
+
+* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5))
+
+## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30)
+
+
+### Features
+
+* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
+* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
+* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
+* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
+* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
+* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
+* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00))
+* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5))
+* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6))
+* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
+* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
+
+
+### Bug Fixes
+
+* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260)
+* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41))
+* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
+* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
+
+## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09)
+
+
+### Features
+
+* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
+* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
+* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b))
+* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2))
+* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
+* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b))
+* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345))
+* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
+* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b))
+
+
+### Bug Fixes
+
+* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a))
+* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
+
+## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02)
+
+
+### Features
+
+* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
+* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda))
+* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232))
+* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
+* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834))
+* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
+* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098))
+* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
+* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
+
+## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17)
+
+
+### Features
+
+* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
+* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
+* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b))
+* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
+* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763))
+* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
+* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
+* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
+* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
+* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
+* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540))
+* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
+* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c))
+* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4))
+* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c))
+* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481))
+* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38))
+* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808))
+* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310))
+* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f))
+* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
+* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
+* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
+* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
+
+
+### Bug Fixes
+
+* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
+* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e))
+* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25))
+* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5))
+* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34))
+* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037)
+* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
+
+
+### Miscellaneous Chores
+
+* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7))
+
+## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02)
+
+
+### Features
+
+* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
+* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4))
+* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
+* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
+* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
+
+
+### Bug Fixes
+
+* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678))
+
+## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23)
+
+
+### ⚠ BREAKING CHANGES
+
+* **all:** This is a breaking change in dialogflow
+
+### Features
+
+* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634)
+* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61))
+* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf))
+* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57))
+* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f))
+* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
+* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee))
+
+
+### Miscellaneous Chores
+
+* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
+
+## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10)
+
+
+### Features
+
+* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f))
+* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
+* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
+* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441))
+* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25))
+* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e))
+* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548))
+* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650))
+* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4))
+* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
+
+
+### Bug Fixes
+
+* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
+* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
+* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495))
+* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80))
+* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef))
+* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c))
+* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e))
+
+## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22)
+
+
+### Features
+
+* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
+* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+
+
+### Bug Fixes
+
+* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
+* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
+
+## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16)
+
+
+### Features
+
+* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04))
+* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
+* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2))
+* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418))
+* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f))
+* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac))
+
+
+### Bug Fixes
+
+* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd))
+* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
+* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
+* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363))
+* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
+
+## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)
+
+
+### Features
+
+* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db))
+* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d))
+* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
+* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e))
+* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
+* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
+* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7))
+* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3))
+* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
+* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf))
+* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5))
+* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
+* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
+* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
+* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102))
+* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149))
+* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b))
+* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e))
+* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af))
+* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279))
+* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359))
+* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f))
+* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531))
+* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
+* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8))
+* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
+* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60))
+* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
+* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
+* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a))
+* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db))
+
+
+### Bug Fixes
+
+* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
+* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a))
+* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71))
+
+## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448)
+* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a))
+
+## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374)
+* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84))
+* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba))
+
+
+### Bug Fixes
+
+* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5))
+
+## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199)
+* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff))
+* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be))
+* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999))
+* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a))
+
+
+### Bug Fixes
+
+* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0))
+
+## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119)
+
+
+### Bug Fixes
+
+* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44))
+* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a))
+
+
+## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044)
+* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3))
+* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42))
+* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc))
+* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171))
+* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6))
+* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2))
+* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900))
+
+
+### Bug Fixes
+
+* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb))
+* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7))
+
+## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025)
+* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44))
+* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714))
+
+## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14)
+
+This is an empty release that was created solely to aid in pubsublite's module
+carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14)
+
+
+### Features
+
+* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045))
+* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958)
+* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33))
+* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf))
+* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b))
+* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e))
+* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85))
+
+
+### Bug Fixes
+
+* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0))
+
+## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935)
+
+## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864)
+* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568))
+* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72))
+* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830))
+* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e))
+* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61))
+
+
+### Bug Fixes
+
+* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d))
+* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7))
+* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883)
+
+## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781)
+* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93))
+* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521))
+* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64))
+* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5))
+* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893))
+* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4))
+* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901))
+* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580))
+
+
+### Bug Fixes
+
+* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639))
+* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c))
+* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835)
+
+
+### Reverts
+
+* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557))
+
+## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27)
+
+
+### Announcements
+
+The following changes will be included in an upcoming release and are not
+included in this one.
+
+#### Default Deadlines
+
+By default, non-streaming methods, like Create or Get methods, will have a
+default deadline applied to the context provided at call time, unless a context
+deadline is already set. Streaming methods have no default deadline and will run
+indefinitely, unless the context provided at call time contains a deadline.
+
+To opt-out of this behavior, set the environment variable
+`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to
+initializing a client. This opt-out mechanism will be removed in a later
+release, with a notice similar to this one ahead of its removal.
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764)
+
+
+### Bug Fixes
+
+* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1))
+* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d))
+
+## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18)
+
+
+### Features
+
+* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706)
+* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e))
+
+## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05)
+
+
+### Features
+
+* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04))
+* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c))
+* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b))
+* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601))
+* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32))
+
+## v0.62.0
+
+### Announcements
+
+- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was
+ merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606).
+ This fixed a broken API response for `DiagnoseCluster`. When polling on the
+ Long Running Operation(LRO), the API now returns
+ `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an
+ `error` before.
+
+### Changes
+
+- all:
+ - Updated all direct dependencies.
+ - Updated contributing guidelines to suggest allowing edits from maintainers.
+- billing/budgets:
+ - Start generating client for apiv1beta1.
+- functions:
+ - Start generating client for apiv1.
+- notebooks:
+ - Start generating client apiv1beta1.
+- profiler:
+ - update proftest to support parsing floating-point backoff durations.
+ - Fix the regexp used to parse backoff duration.
+- Various updates to autogenerated clients.
+
+## v0.61.0
+
+### Changes
+
+- all:
+ - Update all direct dependencies.
+- dashboard:
+ - Start generating client for apiv1.
+- policytroubleshooter:
+ - Start generating client for apiv1.
+- profiler:
+ - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`.
+- Various updates to autogenerated clients.
+
+## v0.60.0
+
+### Changes
+
+- all:
+ - Refactored examples to reduce module dependencies.
+ - Update sub-modules to use cloud.google.com/go v0.59.0.
+- internal:
+ - Start generating client for gaming apiv1beta.
+- Various updates to autogenerated clients.
+
+## v0.59.0
+
+### Announcements
+
+goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our
+contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept
+pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to
+point to GitHub.
+
+### Changes
+
+- all:
+ - Remove dependency on honnef.co/go/tools.
+ - Update our contributing instructions now that we use GitHub for reviews.
+ - Remove some un-inclusive terminology.
+- compute/metadata:
+ - Pass cancelable context to DNS lookup.
+- .github:
+ - Update templates issue/PR templates.
+- internal:
+ - Bump several clients to GA.
+ - Fix GoDoc badge source.
+ - Several automation changes related to the move to GitHub.
+ - Start generating a client for asset v1p5beta1.
+- Various updates to autogenerated clients.
+
+## v0.58.0
+
+### Deprecation notice
+
+- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking
+ changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`.
+
+### Changes
+
+- all:
+ - The remaining uses of gtransport.Dial have been removed.
+ - The `genproto` dependency has been updated to a version that makes use of
+ new `protoreflect` library. For more information on these protobuf changes
+ please see the following post from the official Go blog:
+ https://blog.golang.org/protobuf-apiv2.
+- internal:
+ - Started generation of datastore admin v1 client.
+ - Updated protofuf version used for generation to 3.12.X.
+ - Update the release levels for several APIs.
+ - Generate clients with protoc-gen-go@v1.4.1.
+- monitoring:
+ - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation
+ notice above).
+- profiler:
+ - Fixed flakiness in tests.
+- Various updates to autogenerated clients.
+
+## v0.57.0
+
+- all:
+ - Update module dependency `google.golang.org/api` to `v0.21.0`.
+- errorreporting:
+ - Add exported SetGoogleClientInfo wrappers to manual file.
+- expr/v1alpha1:
+ - Deprecate client. This client will be removed in a future release.
+- internal:
+ - Fix possible data race in TestTracer.
+ - Pin versions of tools used for generation.
+ - Correct the release levels for BigQuery APIs.
+ - Start generation osconfig v1.
+- longrunning:
+ - Add exported SetGoogleClientInfo wrappers to manual file.
+- monitoring:
+ - Stop generation of monitoring/apiv3 because of incoming breaking change.
+- trace:
+ - Add exported SetGoogleClientInfo wrappers to manual file.
+- Various updates to autogenerated clients.
+
+## v0.56.0
+
+- secretmanager:
+ - add IAM helper
+- profiler:
+ - try all us-west1 zones for integration tests
+- internal:
+ - add config to generate webrisk v1
+ - add repo and commit to buildcop invocation
+ - add recaptchaenterprise v1 generation config
+ - update microgenerator to v0.12.5
+ - add datacatalog client
+ - start generating security center settings v1beta
+ - start generating osconfig agentendpoint v1
+ - setup generation for bigquery/connection/v1beta1
+- all:
+ - increase continous testing timeout to 45m
+ - various updates to autogenerated clients.
+
+## v0.55.0
+
+- Various updates to autogenerated clients.
+
+## v0.54.0
+
+- all:
+ - remove unused golang.org/x/exp from mod file
+ - update godoc.org links to pkg.go.dev
+- compute/metadata:
+ - use defaultClient when http.Client is nil
+ - remove subscribeClient
+- iam:
+ - add support for v3 policy and IAM conditions
+- Various updates to autogenerated clients.
+
+## v0.53.0
+
+- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers).
+ - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API.
+- profiler: remove symbolization (drops support for go1.10)
+- Various updates to autogenerated clients.
+
+## v0.52.0
+
+- internal/gapicgen: multiple improvements related to library generation.
+- compute/metadata: unset ResponseHeaderTimeout in defaultClient
+- docs: fix link to KMS in README.md
+- Various updates to autogenerated clients.
+
+## v0.51.0
+
+- secretmanager:
+ - add IAM helper for generic resource IAM handle
+- cloudbuild:
+ - migrate to microgen in a major version
+- Various updates to autogenerated clients.
+
+## v0.50.0
+
+- profiler:
+ - Support disabling CPU profile collection.
+ - Log when a profile creation attempt begins.
+- compute/metadata:
+ - Fix panic on malformed URLs.
+ - InstanceName returns actual instance name.
+- Various updates to autogenerated clients.
+
+## v0.49.0
+
+- functions/metadata:
+ - Handle string resources in JSON unmarshaller.
+- Various updates to autogenerated clients.
+
+## v0.48.0
+
+- Various updates to autogenerated clients
+
+## v0.47.0
+
+This release drops support for Go 1.9 and Go 1.10: we continue to officially
+support Go 1.11, Go 1.12, and Go 1.13.
+
+- Various updates to autogenerated clients.
+- Add cloudbuild/apiv1 client.
+
+## v0.46.3
+
+This is an empty release that was created solely to aid in storage's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.2
+
+This is an empty release that was created solely to aid in spanner's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.1
+
+This is an empty release that was created solely to aid in firestore's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.46.0
+
+- spanner:
+ - Retry "Session not found" for read-only transactions.
+ - Retry aborted PDMLs.
+- spanner/spannertest:
+ - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly.
+- storage:
+ - Add HMACKeyOptions.
+ - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL,
+ DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice
+ StorageClasses but they are still acceptable values.
+- trace:
+ - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been
+ marked OBSOLETE for several years: it is now no longer provided. If you
+ relied on this package, please vendor it or switch to using
+ https://cloud.google.com/trace/docs/setup/go (which obsoleted it).
+
+## v0.45.1
+
+This is an empty release that was created solely to aid in pubsub's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.45.0
+
+- compute/metadata:
+ - Add Email method.
+- storage:
+ - Fix duplicated retry logic.
+ - Add ReaderObjectAttrs.StartOffset.
+ - Support reading last N bytes of a file when a negative range is given, such
+ as `obj.NewRangeReader(ctx, -10, -1)`.
+ - Add HMACKey listing functionality.
+- spanner/spannertest:
+ - Support primary keys with no columns.
+ - Fix MinInt64 parsing.
+ - Implement deletion of key ranges.
+ - Handle reads during a read-write transaction.
+ - Handle returning DATE values.
+- pubsub:
+ - Fix Ack/Modack request size calculation.
+- logging:
+ - Add auto-detection of monitored resources on GAE Standard.
+
+## v0.44.3
+
+This is an empty release that was created solely to aid in bigtable's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.44.2
+
+This is an empty release that was created solely to aid in bigquery's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.44.1
+
+This is an empty release that was created solely to aid in datastore's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.44.0
+
+- datastore:
+ - Interface elements whose underlying types are supported, are now supported.
+ - Reduce time to initial retry from 1s to 100ms.
+- firestore:
+ - Add Increment transformation.
+- storage:
+ - Allow emulator with STORAGE_EMULATOR_HOST.
+ - Add methods for HMAC key management.
+- pubsub:
+ - Add PublishCount and PublishLatency measurements.
+ - Add DefaultPublishViews and DefaultSubscribeViews for convenience of
+ importing all views.
+ - Add add Subscription.PushConfig.AuthenticationMethod.
+- spanner:
+ - Allow emulator usage with SPANNER_EMULATOR_HOST.
+ - Add cloud.google.com/go/spanner/spannertest, a spanner emulator.
+ - Add cloud.google.com/go/spanner/spansql which contains types and a parser
+ for the Cloud Spanner SQL dialect.
+- asset:
+ - Add apiv1p2beta1 client.
+
+## v0.43.0
+
+This is an empty release that was created solely to aid in logging's module
+carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
+
+## v0.42.0
+
+- bigtable:
+ - Add an admin method to update an instance and clusters.
+ - Fix bttest regex matching behavior for alternations (things like `|a`).
+ - Expose BlockAllFilter filter.
+- bigquery:
+ - Add Routines API support.
+- storage:
+ - Add read-only Bucket.LocationType.
+- logging:
+ - Add TraceSampled to Entry.
+ - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context.
+- pubsub:
+ - Add Cloud Key Management to TopicConfig.
+ - Change ExpirationPolicy to optional.Duration.
+- automl:
+ - Add apiv1beta1 client.
+- iam:
+ - Fix compilation problem with iam/credentials/apiv1.
+
+## v0.41.0
+
+- bigtable:
+ - Check results from PredicateFilter in bttest, which fixes certain false matches.
+- profiler:
+ - debugLog checks user defined logging options before logging.
+- spanner:
+ - PartitionedUpdates respect query parameters.
+ - StartInstance allows specifying cloud API access scopes.
+- bigquery:
+ - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics.
+- firestore:
+ - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll.
+- replay:
+ - Change references to IPv4 addresses to localhost, making replay compatible with IPv6.
+
+## v0.40.0
+
+- all:
+ - Update to protobuf-golang v1.3.1.
+- datastore:
+ - Attempt to decode GAE-encoded keys if initial decoding attempt fails.
+ - Support integer time conversion.
+- pubsub:
+ - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow,
+ this value should be adjusted higher.
+ - Use IPv6 compatible target in testutil.
+- bigtable:
+ - Fix Latin-1 regexp filters in bttest, allowing \C.
+ - Expose PassAllFilter.
+- profiler:
+ - Add log messages for slow path in start.
+ - Fix start to allow retry until success.
+- firestore:
+ - Add admin client.
+- containeranalysis:
+ - Add apiv1 client.
+- grafeas:
+ - Add apiv1 client.
+
+## 0.39.0
+
+- bigtable:
+ - Implement DeleteInstance in bttest.
+ - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest.
+- bigquery:
+ - Move RequirePartitionFilter outside of TimePartioning.
+ - Expose models API.
+- firestore:
+ - Allow array values in create and update calls.
+ - Add CollectionGroup method.
+- pubsub:
+ - Add ExpirationPolicy to Subscription.
+- storage:
+ - Add V4 signing.
+- rpcreplay:
+ - Match streams by first sent request. This further improves rpcreplay's
+ ability to distinguish streams.
+- httpreplay:
+ - Set up Man-In-The-Middle config only once. This should improve proxy
+ creation when multiple proxies are used in a single process.
+ - Remove error on empty Content-Type, allowing requests with no Content-Type
+ header but a non-empty body.
+- all:
+ - Fix an edge case bug in auto-generated library pagination by properly
+ propagating pagetoken.
+
+## 0.38.0
+
+This update includes a substantial reduction in our transitive dependency list
+by way of updating to opencensus@v0.21.0.
+
+- spanner:
+ - Error implements GRPCStatus, allowing status.Convert.
+- bigtable:
+ - Fix a bug in bttest that prevents single column queries returning results
+ that match other filters.
+ - Remove verbose retry logging.
+- logging:
+ - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and
+ rune replace manually.
+- recaptchaenterprise:
+ - Add v1beta1 client.
+- phishingprotection:
+ - Add v1beta1 client.
+
+## 0.37.4
+
+This patch releases re-builds the go.sum. This was not possible in the
+previous release.
+
+- firestore:
+ - Add sentinel value DetectProjectID for auto-detecting project ID.
+ - Add OpenCensus tracing for public methods.
+ - Marked stable. All future changes come with a backwards compatibility
+ guarantee.
+ - Removed firestore/apiv1beta1. All users relying on this low-level library
+ should migrate to firestore/apiv1. Note that most users should use the
+ high-level firestore package instead.
+- pubsub:
+ - Allow large messages in synchronous pull case.
+ - Cap bundler byte limit. This should prevent OOM conditions when there are
+ a very large number of message publishes occurring.
+- storage:
+ - Add ETag to BucketAttrs and ObjectAttrs.
+- datastore:
+ - Removed some non-sensical OpenCensus traces.
+- webrisk:
+ - Add v1 client.
+- asset:
+ - Add v1 client.
+- cloudtasks:
+ - Add v2 client.
+
+## 0.37.3
+
+This patch release removes github.com/golang/lint from the transitive
+dependency list, resolving `go get -u` problems.
+
+Note: this release intentionally has a broken go.sum. Please use v0.37.4.
+
+## 0.37.2
+
+This patch release is mostly intended to bring in v0.3.0 of
+google.golang.org/api, which fixes a GCF deployment issue.
+
+Note: we had to-date accidentally marked Redis as stable. In this release, we've
+fixed it by downgrading its documentation to alpha, as it is in other languages
+and docs.
+
+- all:
+ - Document context in generated libraries.
+
+## 0.37.1
+
+Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which
+introduces a new oauth2 url.
+
+## 0.37.0
+
+- spanner:
+ - Add BatchDML method.
+ - Reduced initial time between retries.
+- bigquery:
+ - Produce better error messages for InferSchema.
+ - Add logical type control for avro loads.
+ - Add support for the GEOGRAPHY type.
+- datastore:
+ - Add sentinel value DetectProjectID for auto-detecting project ID.
+ - Allow flatten tag on struct pointers.
+ - Fixed a bug that caused queries to panic with invalid queries. Instead they
+ will now return an error.
+- profiler:
+ - Add ability to override GCE zone and instance.
+- pubsub:
+ - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more
+ consistently retry specific error codes based on whether they're idempotent
+ or non-idempotent.
+- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing
+ the Content-Length header to be dropped.
+- iot:
+ - Add new apiv1 client.
+- securitycenter:
+ - Add new apiv1 client.
+- cloudscheduler:
+ - Add new apiv1 client.
+
+## 0.36.0
+
+- spanner:
+ - Reduce minimum retry backoff from 1s to 100ms. This makes time between
+ retries much faster and should improve latency.
+- storage:
+ - Add support for Bucket Policy Only.
+- kms:
+ - Add ResourceIAM helper method.
+ - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM.
+- firestore:
+ - Switch from v1beta1 API to v1 API.
+ - Allow emulator with FIRESTORE_EMULATOR_HOST.
+- bigquery:
+ - Add NumLongTermBytes to Table.
+ - Add TotalBytesProcessedAccuracy to QueryStatistics.
+- irm:
+ - Add new v1alpha2 client.
+- talent:
+ - Add new v4beta1 client.
+- rpcreplay:
+ - Fix connection to work with grpc >= 1.17.
+ - It is now required for an actual gRPC server to be running for Dial to
+ succeed.
+
+## 0.35.1
+
+- spanner:
+ - Adds OpenCensus views back to public API.
+
+## v0.35.0
+
+- all:
+ - Add go.mod and go.sum.
+ - Switch usage of gax-go to gax-go/v2.
+- bigquery:
+ - Fix bug where time partitioning could not be removed from a table.
+ - Fix panic that occurred with empty query parameters.
+- bttest:
+ - Fix bug where deleted rows were returned by ReadRows.
+- bigtable/emulator:
+ - Configure max message size to 256 MiB.
+- firestore:
+ - Allow non-transactional queries in transactions.
+ - Allow StartAt/EndBefore on direct children at any depth.
+ - QuerySnapshotIterator.Stop may be called in an error state.
+ - Fix bug the prevented reset of transaction write state in between retries.
+- functions/metadata:
+ - Make Metadata.Resource a pointer.
+- logging:
+ - Make SpanID available in logging.Entry.
+- metadata:
+ - Wrap !200 error code in a typed err.
+- profiler:
+ - Add function to check if function name is within a particular file in the
+ profile.
+ - Set parent field in create profile request.
+ - Return kubernetes client to start cluster, so client can be used to poll
+ cluster.
+ - Add function for checking if filename is in profile.
+- pubsub:
+ - Fix bug where messages expired without an initial modack in
+ synchronous=true mode.
+ - Receive does not retry ResourceExhausted errors.
+- spanner:
+ - client.Close now cancels existing requests and should be much faster for
+ large amounts of sessions.
+ - Correctly allow MinOpened sessions to be spun up.
+
+## v0.34.0
+
+- functions/metadata:
+ - Switch to using JSON in context.
+ - Make Resource a value.
+- vision: Fix ProductSearch return type.
+- datastore: Add an example for how to handle MultiError.
+
+## v0.33.1
+
+- compute: Removes an erroneously added go.mod.
+- logging: Populate source location in fromLogEntry.
+
+## v0.33.0
+
+- bttest:
+ - Add support for apply_label_transformer.
+- expr:
+ - Add expr library.
+- firestore:
+ - Support retrieval of missing documents.
+- kms:
+ - Add IAM methods.
+- pubsub:
+ - Clarify extension documentation.
+- scheduler:
+ - Add v1beta1 client.
+- vision:
+ - Add product search helper.
+ - Add new product search client.
+
+## v0.32.0
+
+Note: This release is the last to support Go 1.6 and 1.8.
+
+- bigquery:
+ - Add support for removing an expiration.
+ - Ignore NeverExpire in Table.Create.
+ - Validate table expiration time.
+- cbt:
+ - Add note about not supporting arbitrary bytes.
+- datastore:
+ - Align key checks.
+- firestore:
+ - Return an error when using Start/End without providing values.
+- pubsub:
+ - Add pstest Close method.
+ - Clarify MaxExtension documentation.
+- securitycenter:
+ - Add v1beta1 client.
+- spanner:
+ - Allow nil in mutations.
+ - Improve doc of SessionPoolConfig.MaxOpened.
+ - Increase session deletion timeout from 5s to 15s.
+
+## v0.31.0
+
+- bigtable:
+ - Group mutations across multiple requests.
+- bigquery:
+ - Link to bigquery troubleshooting errors page in bigquery.Error comment.
+- cbt:
+ - Fix go generate command.
+ - Document usage of both maxage + maxversions.
+- datastore:
+ - Passing nil keys results in ErrInvalidKey.
+- firestore:
+ - Clarify what Document.DataTo does with untouched struct fields.
+- profile:
+ - Validate service name in agent.
+- pubsub:
+ - Fix deadlock with pstest and ctx.Cancel.
+ - Fix a possible deadlock in pstest.
+- trace:
+ - Update doc URL with new fragment.
+
+Special thanks to @fastest963 for going above and beyond helping us to debug
+hard-to-reproduce Pub/Sub issues.
+
+## v0.30.0
+
+- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information.
+- bigtable: bttest supports row sample filter.
+- functions: metadata package added for accessing Cloud Functions resource metadata.
+
+## v0.29.0
+
+- bigtable:
+ - Add retry to all idempotent RPCs.
+ - cbt supports complex GC policies.
+ - Emulator supports arbitrary bytes in regex filters.
+- firestore: Add ArrayUnion and ArrayRemove.
+- logging: Add the ContextFunc option to supply the context used for
+ asynchronous RPCs.
+- profiler: Ignore NotDefinedError when fetching the instance name
+- pubsub:
+ - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled.
+ - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning.
+ - Fix deadlock.
+ - Restore Ack/Nack/Modacks metrics.
+ - Improve context handling in iterator.
+ - Implement synchronous mode for Receive.
+ - pstest: add Pull.
+- spanner: Add a metric for the number of sessions currently opened.
+- storage:
+ - Canceling the context releases all resources.
+ - Add additional RetentionPolicy attributes.
+- vision/apiv1: Add LocalizeObjects method.
+
+## v0.28.0
+
+- bigtable:
+ - Emulator returns Unimplemented for snapshot RPCs.
+- bigquery:
+ - Support zero-length repeated, nested fields.
+- cloud assets:
+ - Add v1beta client.
+- datastore:
+ - Don't nil out transaction ID on retry.
+- firestore:
+ - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next
+ returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator
+ (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots.
+ - Add array-contains operator.
+- IAM:
+ - Add iam/credentials/apiv1 client.
+- pubsub:
+ - Canceling the context passed to Subscription.Receive causes Receive to return when
+ processing finishes on all messages currently in progress, even if new messages are arriving.
+- redis:
+ - Add redis/apiv1 client.
+- storage:
+ - Add Reader.Attrs.
+ - Deprecate several Reader getter methods: please use Reader.Attrs for these instead.
+ - Add ObjectHandle.Bucket and ObjectHandle.Object methods.
+
+## v0.27.0
+
+- bigquery:
+ - Allow modification of encryption configuration and partitioning options to a table via the Update call.
+ - Add a SchemaFromJSON function that converts a JSON table schema.
+- bigtable:
+ - Restore cbt count functionality.
+- containeranalysis:
+ - Add v1beta client.
+- spanner:
+ - Fix a case where an iterator might not be closed correctly.
+- storage:
+ - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount.
+ - Add a method to Reader that returns the parsed value of the Last-Modified header.
+
+## v0.26.0
+
+- bigquery:
+ - Support filtering listed jobs by min/max creation time.
+ - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering).
+ - Include job creator email in Job struct.
+- bigtable:
+ - Add `RowSampleFilter`.
+ - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters
+ must match the entire target string to succeed. Previously, the emulator was
+ succeeding on partial matches.
+ NOTE: As of this release, this change only affects the emulator when run
+ from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched
+ from `gcloud` will be updated in a subsequent `gcloud` release.
+- dataproc: Add apiv1beta2 client.
+- datastore: Save non-nil pointer fields on omitempty.
+- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header.
+- logging/logadmin: Support writer_identity and include_children.
+- pubsub:
+ - Support labels on topics and subscriptions.
+ - Support message storage policy for topics.
+ - Use the distribution of ack times to determine when to extend ack deadlines.
+ The only user-visible effect of this change should be that programs that
+ call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub
+ Subscriber`.
+- storage:
+ - Support predefined ACLs.
+ - Support additional ACL fields other than Entity and Role.
+ - Support bucket websites.
+ - Support bucket logging.
+
+
+## v0.25.0
+
+- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md)
+- bigtable:
+ - cbt: Support a GC policy of "never".
+- errorreporting:
+ - Support User.
+ - Close now calls Flush.
+ - Use OnError (previously ignored).
+ - Pass through the RPC error as-is to OnError.
+- httpreplay: A tool for recording and replaying HTTP requests
+ (for the bigquery and storage clients in this repo).
+- kms: v1 client added
+- logging: add SourceLocation to Entry.
+- storage: improve CRC checking on read.
+
+## v0.24.0
+
+- bigquery: Support for the NUMERIC type.
+- bigtable:
+ - cbt: Optionally specify columns for read/lookup
+ - Support instance-level administration.
+- oslogin: New client for the OS Login API.
+- pubsub:
+ - The package is now stable. There will be no further breaking changes.
+ - Internal changes to improve Subscription.Receive behavior.
+- storage: Support updating bucket lifecycle config.
+- spanner: Support struct-typed parameter bindings.
+- texttospeech: New client for the Text-to-Speech API.
+
+## v0.23.0
+
+- bigquery: Add DDL stats to query statistics.
+- bigtable:
+ - cbt: Add cells-per-column limit for row lookup.
+ - cbt: Make it possible to combine read filters.
+- dlp: v2beta2 client removed. Use the v2 client instead.
+- firestore, spanner: Fix compilation errors due to protobuf changes.
+
+## v0.22.0
+
+- bigtable:
+ - cbt: Support cells per column limit for row read.
+ - bttest: Correctly handle empty RowSet.
+ - Fix ReadModifyWrite operation in emulator.
+ - Fix API path in GetCluster.
+
+- bigquery:
+ - BEHAVIOR CHANGE: Retry on 503 status code.
+ - Add dataset.DeleteWithContents.
+ - Add SchemaUpdateOptions for query jobs.
+ - Add Timeline to QueryStatistics.
+ - Add more stats to ExplainQueryStage.
+ - Support Parquet data format.
+
+- datastore:
+ - Support omitempty for times.
+
+- dlp:
+ - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
+ which is now out of beta.
+ - Add v2 client.
+
+- firestore:
+ - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
+
+- iam:
+ - Support JWT signing via SignJwt callopt.
+
+- profiler:
+ - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
+ - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
+ - Avoid returning empty serial port output.
+
+- pubsub:
+ - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
+ - BEHAVIOR CHANGE: Don't backoff on EOF.
+ - pstest: Support Acknowledge and ModifyAckDeadline RPCs.
+
+- redis:
+ - Add v1 beta Redis client.
+
+- spanner:
+ - Support SessionLabels.
+
+- speech:
+ - Add api v1 beta1 client.
+
+- storage:
+ - BEHAVIOR CHANGE: Retry reads when retryable error occurs.
+ - Fix delete of object in requester-pays bucket.
+ - Support KMS integration.
+
+## v0.21.0
+
+- bigquery:
+ - Add OpenCensus tracing.
+
+- firestore:
+ - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot
+ whose Exists method returns false. DocumentRef.Get and Transaction.Get
+ return the non-nil DocumentSnapshot in addition to a NotFound error.
+ **DocumentRef.GetAll and Transaction.GetAll return a non-nil
+ DocumentSnapshot instead of nil.**
+ - Add DocumentIterator.Stop. **Call Stop whenever you are done with a
+ DocumentIterator.**
+ - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime
+ notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen.
+ - Canceling an RPC now always returns a grpc.Status with codes.Canceled.
+
+- spanner:
+ - Add `CommitTimestamp`, which supports inserting the commit timestamp of a
+ transaction into a column.
+
+## v0.20.0
+
+- bigquery: Support SchemaUpdateOptions for load jobs.
+
+- bigtable:
+ - Add SampleRowKeys.
+ - cbt: Support union, intersection GCPolicy.
+ - Retry admin RPCS.
+ - Add trace spans to retries.
+
+- datastore: Add OpenCensus tracing.
+
+- firestore:
+ - Fix queries involving Null and NaN.
+ - Allow Timestamp protobuffers for time values.
+
+- logging: Add a WriteTimeout option.
+
+- spanner: Support Batch API.
+
+- storage: Add OpenCensus tracing.
+
+## v0.19.0
+
+- bigquery:
+ - Support customer-managed encryption keys.
+
+- bigtable:
+ - Improved emulator support.
+ - Support GetCluster.
+
+- datastore:
+ - Add general mutations.
+ - Support pointer struct fields.
+ - Support transaction options.
+
+- firestore:
+ - Add Transaction.GetAll.
+ - Support document cursors.
+
+- logging:
+ - Support concurrent RPCs to the service.
+ - Support per-entry resources.
+
+- profiler:
+ - Add config options to disable heap and thread profiling.
+ - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
+
+- pubsub:
+ - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
+ callback returns).
+ - Add SubscriptionInProject.
+ - Add OpenCensus instrumentation for streaming pull.
+
+- storage:
+ - Support CORS.
+
+## v0.18.0
+
+- bigquery:
+ - Marked stable.
+ - Schema inference of nullable fields supported.
+ - Added TimePartitioning to QueryConfig.
+
+- firestore: Data provided to DocumentRef.Set with a Merge option can contain
+ Delete sentinels.
+
+- logging: Clients can accept parent resources other than projects.
+
+- pubsub:
+ - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
+ - Support updating more subscription metadata: AckDeadline,
+ RetainAckedMessages and RetentionDuration.
+
+- oslogin/apiv1beta: New client for the Cloud OS Login API.
+
+- rpcreplay: A package for recording and replaying gRPC traffic.
+
+- spanner:
+ - Add a ReadWithOptions that supports a row limit, as well as an index.
+ - Support query plan and execution statistics.
+ - Added [OpenCensus](http://opencensus.io) support.
+
+- storage: Clarify checksum validation for gzipped files (it is not validated
+ when the file is served uncompressed).
+
+
+## v0.17.0
+
+- firestore BREAKING CHANGES:
+ - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
+ Change
+ `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
+ to
+ `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
+
+ Change
+ `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
+ to
+ `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
+ - Rename MergePaths to Merge; require args to be FieldPaths
+ - A value stored as an integer can be read into a floating-point field, and vice versa.
+- bigtable/cmd/cbt:
+ - Support deleting a column.
+ - Add regex option for row read.
+- spanner: Mark stable.
+- storage:
+ - Add Reader.ContentEncoding method.
+ - Fix handling of SignedURL headers.
+- bigquery:
+ - If Uploader.Put is called with no rows, it returns nil without making a
+ call.
+ - Schema inference supports the "nullable" option in struct tags for
+ non-required fields.
+ - TimePartitioning supports "Field".
+
+
+## v0.16.0
+
+- Other bigquery changes:
+ - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
+ - UseStandardSQL is deprecated; set UseLegacySQL to true if you need
+ Legacy SQL.
+ - Uploader.Put will generate a random insert ID if you do not provide one.
+ - Support time partitioning for load jobs.
+ - Support dry-run queries.
+ - A `Job` remembers its last retrieved status.
+ - Support retrieving job configuration.
+ - Support labels for jobs and tables.
+ - Support dataset access lists.
+ - Improve support for external data sources, including data from Bigtable and
+ Google Sheets, and tables with external data.
+ - Support updating a table's view configuration.
+ - Fix uploading civil times with nanoseconds.
+
+- storage:
+ - Support PubSub notifications.
+ - Support Requester Pays buckets.
+
+- profiler: Support goroutine and mutex profile types.
+
+## v0.15.0
+
+- firestore: beta release. See the
+ [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
+
+- errorreporting: The existing package has been redesigned.
+
+- errors: This package has been removed. Use errorreporting.
+
+
+## v0.14.0
+
+- bigquery BREAKING CHANGES:
+ - Standard SQL is the default for queries and views.
+ - `Table.Create` takes `TableMetadata` as a second argument, instead of
+ options.
+ - `Dataset.Create` takes `DatasetMetadata` as a second argument.
+ - `DatasetMetadata` field `ID` renamed to `FullID`
+ - `TableMetadata` field `ID` renamed to `FullID`
+
+- Other bigquery changes:
+ - The client will append a random suffix to a provided job ID if you set
+ `AddJobIDSuffix` to true in a job config.
+ - Listing jobs is supported.
+ - Better retry logic.
+
+- vision, language, speech: clients are now stable
+
+- monitoring: client is now beta
+
+- profiler:
+ - Rename InstanceName to Instance, ZoneName to Zone
+ - Auto-detect service name and version on AppEngine.
+
+## v0.13.0
+
+- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
+ options to continue using Legacy SQL after the client switches its default
+ to Standard SQL.
+
+- bigquery: Support for updating dataset labels.
+
+- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
+ than the client's. DatasetsInProject is no longer needed and is deprecated.
+
+- bigtable: Fail ListInstances when any zones fail.
+
+- spanner: support decoding of slices of basic types (e.g. []string, []int64,
+ etc.)
+
+- logging/logadmin: UpdateSink no longer creates a sink if it is missing
+ (actually a change to the underlying service, not the client)
+
+- profiler: Service and ServiceVersion replace Target in Config.
+
+## v0.12.0
+
+- pubsub: Subscription.Receive now uses streaming pull.
+
+- pubsub: add Client.TopicInProject to access topics in a different project
+ than the client.
+
+- errors: renamed errorreporting. The errors package will be removed shortly.
+
+- datastore: improved retry behavior.
+
+- bigquery: support updates to dataset metadata, with etags.
+
+- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
+
+- bigquery: generate all job IDs on the client.
+
+- storage: support bucket lifecycle configurations.
+
+
+## v0.11.0
+
+- Clients for spanner, pubsub and video are now in beta.
+
+- New client for DLP.
+
+- spanner: performance and testing improvements.
+
+- storage: requester-pays buckets are supported.
+
+- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
+
+- pubsub: bug fixes and other minor improvements
+
+## v0.10.0
+
+- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
+
+- pubsub: Subscription.Receive now runs concurrently for higher throughput.
+
+- vision: cloud.google.com/go/vision is deprecated. Use
+cloud.google.com/go/vision/apiv1 instead.
+
+- translation: now stable.
+
+- trace: several changes to the surface. See the link below.
+
+### Code changes required from v0.9.0
+
+- pubsub: Replace
+
+ ```
+ sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
+ ```
+
+ with
+
+ ```
+ sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
+ PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
+ })
+ ```
+
+- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
+Given an initialized `*trace.Client` named `tc`, instead of
+
+ ```
+ s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
+ ```
+
+ write
+
+ ```
+ s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
+ ```
+
+- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
+Instead of
+
+ ```
+ conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
+ ```
+
+ write
+
+ ```
+ conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
+ ```
+
+- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
+interceptor as a dial option as shown below when initializing Cloud package
+clients:
+
+ ```
+ c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
+ if err != nil {
+ ...
+ }
+ ```
+
+
+## v0.9.0
+
+- Breaking changes to some autogenerated clients.
+- rpcreplay package added.
+
+## v0.8.0
+
+- profiler package added.
+- storage:
+ - Retry Objects.Insert call.
+ - Add ProgressFunc to WRiter.
+- pubsub: breaking changes:
+ - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
+ - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
+ - Message.Done replaced with Message.Ack and Message.Nack.
+
+## v0.7.0
+
+- Release of a client library for Spanner. See
+the
+[blog
+post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
+Note that although the Spanner service is beta, the Go client library is alpha.
+
+## v0.6.0
+
+- Beta release of BigQuery, DataStore, Logging and Storage. See the
+[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
+
+- bigquery:
+ - struct support. Read a row directly into a struct with
+`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
+You can also use field tags. See the [package documentation][cloud-bigquery-ref]
+for details.
+
+ - The `ValueList` type was removed. It is no longer necessary. Instead of
+ ```go
+ var v ValueList
+ ... it.Next(&v) ..
+ ```
+ use
+
+ ```go
+ var v []Value
+ ... it.Next(&v) ...
+ ```
+
+ - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
+ `ValueList` would append to the slice. Now each call resets the size to zero first.
+
+ - Schema inference will infer the SQL type BYTES for a struct field of
+ type []byte. Previously it inferred STRING.
+
+ - The types `uint`, `uint64` and `uintptr` are no longer supported in schema
+ inference. BigQuery's integer type is INT64, and those types may hold values
+ that are not correctly represented in a 64-bit signed integer.
+
+## v0.5.0
+
+- bigquery:
+ - The SQL types DATE, TIME and DATETIME are now supported. They correspond to
+ the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
+ package.
+ - Support for query parameters.
+ - Support deleting a dataset.
+ - Values from INTEGER columns will now be returned as int64, not int. This
+ will avoid errors arising from large values on 32-bit systems.
+- datastore:
+ - Nested Go structs encoded as Entity values, instead of a
+flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg.
+ ```go
+ type State struct {
+ Cities []struct{
+ Populations []int
+ }
+ }
+ ```
+ See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
+more details.
+ - Contexts no longer hold namespaces; instead you must set a key's namespace
+ explicitly. Also, key functions have been changed and renamed.
+ - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
+ ```go
+ q := datastore.NewQuery("Kind").Namespace("ns")
+ ```
+ - All the fields of Key are exported. That means you can construct any Key with a struct literal:
+ ```go
+ k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
+ ```
+ - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
+ - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
+ ```go
+ NewIncompleteKey(ctx, kind, parent)
+ ```
+ with
+ ```go
+ IncompleteKey(kind, parent)
+ ```
+ and if you do use namespaces, make sure you set the namespace on the returned key.
+ - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
+ ```go
+ NewKey(ctx, kind, name, 0, parent)
+ NewKey(ctx, kind, "", id, parent)
+ ```
+ with
+ ```go
+ NameKey(kind, name, parent)
+ IDKey(kind, id, parent)
+ ```
+ and if you do use namespaces, make sure you set the namespace on the returned key.
+ - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
+ - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
+ - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
+more details.
+
+## v0.4.0
+
+- bigquery:
+ -`NewGCSReference` is now a function, not a method on `Client`.
+ - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
+ loading data into a table from a file or any `io.Reader`.
+ * Client.Table and Client.OpenTable have been removed.
+ Replace
+ ```go
+ client.OpenTable("project", "dataset", "table")
+ ```
+ with
+ ```go
+ client.DatasetInProject("project", "dataset").Table("table")
+ ```
+
+ * Client.CreateTable has been removed.
+ Replace
+ ```go
+ client.CreateTable(ctx, "project", "dataset", "table")
+ ```
+ with
+ ```go
+ client.DatasetInProject("project", "dataset").Table("table").Create(ctx)
+ ```
+
+ * Dataset.ListTables have been replaced with Dataset.Tables.
+ Replace
+ ```go
+ tables, err := ds.ListTables(ctx)
+ ```
+ with
+ ```go
+ it := ds.Tables(ctx)
+ for {
+ table, err := it.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ // TODO: Handle error.
+ }
+ // TODO: use table.
+ }
+ ```
+
+ * Client.Read has been replaced with Job.Read, Table.Read and Query.Read.
+ Replace
+ ```go
+ it, err := client.Read(ctx, job)
+ ```
+ with
+ ```go
+ it, err := job.Read(ctx)
+ ```
+ and similarly for reading from tables or queries.
+
+ * The iterator returned from the Read methods is now named RowIterator. Its
+ behavior is closer to the other iterators in these libraries. It no longer
+ supports the Schema method; see the next item.
+ Replace
+ ```go
+ for it.Next(ctx) {
+ var vals ValueList
+ if err := it.Get(&vals); err != nil {
+ // TODO: Handle error.
+ }
+ // TODO: use vals.
+ }
+ if err := it.Err(); err != nil {
+ // TODO: Handle error.
+ }
+ ```
+ with
+ ```
+ for {
+ var vals ValueList
+ err := it.Next(&vals)
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ // TODO: Handle error.
+ }
+ // TODO: use vals.
+ }
+ ```
+ Instead of the `RecordsPerRequest(n)` option, write
+ ```go
+ it.PageInfo().MaxSize = n
+ ```
+ Instead of the `StartIndex(i)` option, write
+ ```go
+ it.StartIndex = i
+ ```
+
+ * ValueLoader.Load now takes a Schema in addition to a slice of Values.
+ Replace
+ ```go
+ func (vl *myValueLoader) Load(v []bigquery.Value)
+ ```
+ with
+ ```go
+ func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema)
+ ```
+
+
+ * Table.Patch is replace by Table.Update.
+ Replace
+ ```go
+ p := table.Patch()
+ p.Description("new description")
+ metadata, err := p.Apply(ctx)
+ ```
+ with
+ ```go
+ metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{
+ Description: "new description",
+ })
+ ```
+
+ * Client.Copy is replaced by separate methods for each of its four functions.
+ All options have been replaced by struct fields.
+
+ * To load data from Google Cloud Storage into a table, use Table.LoaderFrom.
+
+ Replace
+ ```go
+ client.Copy(ctx, table, gcsRef)
+ ```
+ with
+ ```go
+ table.LoaderFrom(gcsRef).Run(ctx)
+ ```
+ Instead of passing options to Copy, set fields on the Loader:
+ ```go
+ loader := table.LoaderFrom(gcsRef)
+ loader.WriteDisposition = bigquery.WriteTruncate
+ ```
+
+ * To extract data from a table into Google Cloud Storage, use
+ Table.ExtractorTo. Set fields on the returned Extractor instead of
+ passing options.
+
+ Replace
+ ```go
+ client.Copy(ctx, gcsRef, table)
+ ```
+ with
+ ```go
+ table.ExtractorTo(gcsRef).Run(ctx)
+ ```
+
+ * To copy data into a table from one or more other tables, use
+ Table.CopierFrom. Set fields on the returned Copier instead of passing options.
+
+ Replace
+ ```go
+ client.Copy(ctx, dstTable, srcTable)
+ ```
+ with
+ ```go
+ dst.Table.CopierFrom(srcTable).Run(ctx)
+ ```
+
+ * To start a query job, create a Query and call its Run method. Set fields
+ on the query instead of passing options.
+
+ Replace
+ ```go
+ client.Copy(ctx, table, query)
+ ```
+ with
+ ```go
+ query.Run(ctx)
+ ```
+
+ * Table.NewUploader has been renamed to Table.Uploader. Instead of options,
+ configure an Uploader by setting its fields.
+ Replace
+ ```go
+ u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
+ ```
+ with
+ ```go
+ u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
+ u.IgnoreUnknownValues = true
+ ```
+
+- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package
+`google.golang.org/api/iterator`.
+
+## v0.3.0
+
+- storage:
+ * AdminClient replaced by methods on Client.
+ Replace
+ ```go
+ adminClient.CreateBucket(ctx, bucketName, attrs)
+ ```
+ with
+ ```go
+ client.Bucket(bucketName).Create(ctx, projectID, attrs)
+ ```
+
+ * BucketHandle.List replaced by BucketHandle.Objects.
+ Replace
+ ```go
+ for query != nil {
+ objs, err := bucket.List(d.ctx, query)
+ if err != nil { ... }
+ query = objs.Next
+ for _, obj := range objs.Results {
+ fmt.Println(obj)
+ }
+ }
+ ```
+ with
+ ```go
+ iter := bucket.Objects(d.ctx, query)
+ for {
+ obj, err := iter.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil { ... }
+ fmt.Println(obj)
+ }
+ ```
+ (The `iterator` package is at `google.golang.org/api/iterator`.)
+
+ Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`.
+
+ Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`.
+
+
+ * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom.
+ Replace
+ ```go
+ attrs, err := src.CopyTo(ctx, dst, nil)
+ ```
+ with
+ ```go
+ attrs, err := dst.CopierFrom(src).Run(ctx)
+ ```
+
+ Replace
+ ```go
+ attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"})
+ ```
+ with
+ ```go
+ c := dst.CopierFrom(src)
+ c.ContextType = "text/html"
+ attrs, err := c.Run(ctx)
+ ```
+
+ * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom.
+ Replace
+ ```go
+ attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil)
+ ```
+ with
+ ```go
+ attrs, err := dst.ComposerFrom(src1, src2).Run(ctx)
+ ```
+
+ * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate.
+ Replace
+ ```go
+ attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"})
+ ```
+ with
+ ```go
+ attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"})
+ ```
+
+ * ObjectHandle.WithConditions replaced by ObjectHandle.If.
+ Replace
+ ```go
+ obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen))
+ ```
+ with
+ ```go
+ obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen})
+ ```
+
+ Replace
+ ```go
+ obj.WithConditions(storage.IfGenerationMatch(0))
+ ```
+ with
+ ```go
+ obj.If(storage.Conditions{DoesNotExist: true})
+ ```
+
+ * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`).
+
+- Package preview/logging deleted. Use logging instead.
+
+## v0.2.0
+
+- Logging client replaced with preview version (see below).
+
+- New clients for some of Google's Machine Learning APIs: Vision, Speech, and
+Natural Language.
+
+- Preview version of a new [Stackdriver Logging][cloud-logging] client in
+[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
+This client uses gRPC as its transport layer, and supports log reading, sinks
+and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
diff --git a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..8fd1bc9c2
--- /dev/null
+++ b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md
new file mode 100644
index 000000000..36d1b275e
--- /dev/null
+++ b/vendor/cloud.google.com/go/CONTRIBUTING.md
@@ -0,0 +1,364 @@
+# Contributing
+
+1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
+ The issue will be used to discuss the bug or feature and should be created
+ before sending a PR.
+
+1. [Install Go](https://golang.org/dl/).
+ 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
+ is in your `PATH`.
+ 1. Check it's working by running `go version`.
+ * If it doesn't work, check the install location, usually
+ `/usr/local/go`, is on your `PATH`.
+
+1. Sign one of the
+[contributor license agreements](#contributor-license-agreements) below.
+
+1. Clone the repo:
+ `git clone https://github.com/googleapis/google-cloud-go`
+
+1. Change into the checked out source:
+ `cd google-cloud-go`
+
+1. Fork the repo.
+
+1. Set your fork as a remote:
+ `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
+
+1. Make changes, commit to your fork.
+
+ Commit messages should follow the
+ [Conventional Commits Style](https://www.conventionalcommits.org). The scope
+ portion should always be filled with the name of the package affected by the
+ changes being made. For example:
+ ```
+ feat(functions): add gophers codelab
+ ```
+
+1. Send a pull request with your changes.
+
+ To minimize friction, consider setting `Allow edits from maintainers` on the
+ PR, which will enable project committers and automation to update your PR.
+
+1. A maintainer will review the pull request and make comments.
+
+ Prefer adding additional commits over amending and force-pushing since it can
+ be difficult to follow code reviews when the commit history changes.
+
+ Commits will be squashed when they're merged.
+
+## Policy on new dependencies
+
+While the Go ecosystem is rich with useful modules, in this project we try to
+minimize the number of direct dependencies we have on modules that are not
+Google-owned.
+
+Adding new third party dependencies can have the following effects:
+* broadens the vulnerability surface
+* increases so called "vanity" import routing infrastructure failure points
+* increases complexity of our own [`third_party`][] imports
+
+So if you are contributing, please either contribute the full implementation
+directly, or find a Google-owned project that provides the functionality. Of
+course, there may be exceptions to this rule, but those should be well defined
+and agreed upon by the maintainers ahead of time.
+
+## Testing
+
+We test code against two versions of Go, the minimum and maximum versions
+supported by our clients. To see which versions these are checkout our
+[README](README.md#supported-versions).
+
+### Integration Tests
+
+In addition to the unit tests, you may run the integration test suite. These
+directions describe setting up your environment to run integration tests for
+_all_ packages: note that many of these instructions may be redundant if you
+intend only to run integration tests on a single package.
+
+#### GCP Setup
+
+To run the integrations tests, creation and configuration of three projects in
+the Google Developers Console is required: one specifically for Firestore
+integration tests, one specifically for Bigtable integration tests, and another
+for all other integration tests. We'll refer to these projects as
+"Firestore project", "Bigtable project" and "general project".
+
+Note: You can skip setting up Bigtable project if you do not plan working on or running a few Bigtable
+tests that require a secondary project
+
+After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
+for each project. Ensure the project-level **Owner**
+[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
+each service account. During the creation of the service account, you should
+download the JSON credential file for use later.
+
+Next, ensure the following APIs are enabled in the general project:
+
+- BigQuery API
+- BigQuery Data Transfer API
+- Cloud Dataproc API
+- Cloud Dataproc Control API Private
+- Cloud Datastore API
+- Cloud Firestore API
+- Cloud Key Management Service (KMS) API
+- Cloud Natural Language API
+- Cloud OS Login API
+- Cloud Pub/Sub API
+- Cloud Resource Manager API
+- Cloud Spanner API
+- Cloud Speech API
+- Cloud Translation API
+- Cloud Video Intelligence API
+- Cloud Vision API
+- Compute Engine API
+- Compute Engine Instance Group Manager API
+- Container Registry API
+- Firebase Rules API
+- Google Cloud APIs
+- Google Cloud Deployment Manager V2 API
+- Google Cloud SQL
+- Google Cloud Storage
+- Google Cloud Storage JSON API
+- Google Compute Engine Instance Group Updater API
+- Google Compute Engine Instance Groups API
+- Kubernetes Engine API
+- Cloud Error Reporting API
+- Pub/Sub Lite API
+
+Next, create a Datastore database in the general project, and a Firestore
+database in the Firestore project.
+
+Finally, in the general project, create an API key for the translate API:
+
+- Go to GCP Developer Console.
+- Navigate to APIs & Services > Credentials.
+- Click Create Credentials > API Key.
+- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
+
+#### Local Setup
+
+Once the three projects are created and configured, set the following environment
+variables:
+
+- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
+bamboo-shift-455) for the general project.
+- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
+project's service account.
+- `GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES`: Comma separated list of developer's Datastore databases. If not provided, default database i.e. empty string is used.
+- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
+(e.g. doorway-cliff-677) for the Firestore project.
+- `GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES` : Comma separated list of developer's Firestore databases. If not provided, default database is used.
+- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
+Firestore project's service account.
+- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
+- `GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID`: Developers Console project's ID (e.g. doorway-cliff-677) for Bigtable optional secondary project. This can be same as Firestore project or any project other than the general project.
+- `GCLOUD_TESTS_BIGTABLE_CLUSTER`: Cluster ID of Bigtable cluster in general project
+- `GCLOUD_TESTS_BIGTABLE_PRI_PROJ_SEC_CLUSTER`: Optional. Cluster ID of Bigtable secondary cluster in general project
+
+As part of the setup that follows, the following variables will be configured:
+
+- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
+in the form
+"projects/P/locations/L/keyRings/R". The creation of this is described below.
+- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
+in the form
+"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
+- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
+
+Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
+create some resources used in integration tests.
+
+From the project's root directory:
+
+``` sh
+# Sets the default project in your env.
+$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+
+# Authenticates the gcloud tool with your account.
+$ gcloud auth login
+
+# Create the indexes for all the databases you want to use in the datastore integration tests.
+# Use empty string as databaseID or skip database flag for default database.
+$ gcloud alpha datastore indexes create --database=your-databaseID-1 --project=$GCLOUD_TESTS_GOLANG_PROJECT_ID testdata/index.yaml
+
+# Creates a Google Cloud storage bucket with the same name as your test project,
+# and with the Cloud Logging service account as owner, for the sink
+# integration tests in logging.
+$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
+$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
+
+# Creates a PubSub topic for integration tests of storage notifications.
+$ gcloud beta pubsub topics create go-storage-notification-test
+# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
+# "service-@gs-project-accounts.iam.gserviceaccount.com"
+# as a publisher to that topic.
+
+# Creates a Spanner instance for the spanner integration tests.
+$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
+# NOTE: Spanner instances are priced by the node-hour, so you may want to
+# delete the instance after testing with 'gcloud beta spanner instances delete'.
+
+$ export MY_KEYRING=some-keyring-name
+$ export MY_LOCATION=global
+$ export MY_SINGLE_LOCATION=us-central1
+# Creates a KMS keyring, in the same location as the default location for your
+# project's buckets.
+$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
+# Creates two keys in the keyring, named key1 and key2.
+$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
+$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
+# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
+$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
+# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
+$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
+
+# Create KMS Key in one region for Bigtable
+$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
+$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
+# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
+$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
+# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
+$ gcloud beta services identity create \
+ --service=bigtableadmin.googleapis.com \
+ --project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+# Note the service agent email for the agent created.
+$ export SERVICE_AGENT_EMAIL=
+
+# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
+$ gcloud kms keys add-iam-policy-binding key1 \
+ --keyring $MY_KEYRING \
+ --location $MY_SINGLE_LOCATION \
+ --role roles/cloudkms.cryptoKeyEncrypterDecrypter \
+ --member "serviceAccount:$SERVICE_AGENT_EMAIL" \
+ --project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+```
+
+It may be useful to add exports to your shell initialization for future use.
+For instance, in `.zshrc`:
+
+```sh
+#### START GO SDK Test Variables
+# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
+export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
+
+# Developers Console project's ID (e.g. bamboo-shift-455) for the Bigtable project.
+export GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID=your-bigtable-optional-secondary-project
+
+# The path to the JSON key file of the general project's service account.
+export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
+
+# Comma separated list of developer's Datastore databases. If not provided,
+# default database i.e. empty string is used.
+export GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES=your-database-1,your-database-2
+
+# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
+export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
+
+# Comma separated list of developer's Firestore databases. If not provided, default database is used.
+export GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES=your-database-1,your-database-2
+
+# The path to the JSON key file of the Firestore project's service account.
+export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
+
+# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
+# The creation of this is described below.
+export MY_KEYRING=my-golang-sdk-test
+export MY_LOCATION=global
+export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
+
+# API key for using the Translate API.
+export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
+
+# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
+export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
+#### END GO SDK Test Variables
+```
+
+#### Running
+
+Once you've done the necessary setup, you can run the integration tests by
+running:
+
+``` sh
+$ go test -v ./...
+```
+
+Note that the above command will not run the tests in other modules. To run
+tests on other modules, first navigate to the appropriate
+subdirectory. For instance, to run only the tests for datastore:
+``` sh
+$ cd datastore
+$ go test -v ./...
+```
+
+#### Replay
+
+Some packages can record the RPCs during integration tests to a file for
+subsequent replay. To record, pass the `-record` flag to `go test`. The
+recording will be saved to the _package_`.replay` file. To replay integration
+tests from a saved recording, the replay file must be present, the `-short`
+flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
+environment variable must have a non-empty value.
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your
+work**, then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
+available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
+
+[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
+[`third_party`]: https://opensource.google/documentation/reference/thirdparty
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
new file mode 100644
index 000000000..8f149c8a4
--- /dev/null
+++ b/vendor/cloud.google.com/go/README.md
@@ -0,0 +1,78 @@
+# Google Cloud Client Libraries for Go
+
+[](https://pkg.go.dev/cloud.google.com/go)
+
+Go packages for [Google Cloud Platform](https://cloud.google.com) services.
+
+## Installation
+
+```bash
+go get cloud.google.com/go/firestore@latest # Replace firestore with the package you want to use.
+```
+
+**NOTE:** Some of these packages are under development, and may occasionally
+make backwards-incompatible changes.
+
+## Supported APIs
+
+For an updated list of all of our released APIs please see our
+[reference docs](https://cloud.google.com/go/docs/reference).
+
+## [Go Versions Supported](#supported-versions)
+
+Our libraries are compatible with the two most recent major Go
+releases, the same [policy](https://go.dev/doc/devel/release#policy) the Go
+programming language follows. This means the currently supported versions are:
+
+- Go 1.23
+- Go 1.24
+
+## Authorization
+
+By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
+for authorization credentials used in calling the API endpoints. This will allow your
+application to run in many environments without requiring explicit configuration.
+
+```go
+client, err := storage.NewClient(ctx)
+```
+
+To authorize using a
+[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
+pass
+[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
+to the `NewClient` function of the desired package. For example:
+
+```go
+client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
+```
+
+You can exert more control over authorization by using the
+[credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) package to
+create an [auth.Credentials](https://pkg.go.dev/cloud.google.com/go/auth#Credentials).
+Then pass [`option.WithAuthCredentials`](https://pkg.go.dev/google.golang.org/api/option#WithAuthCredentials)
+to the `NewClient` function:
+
+```go
+creds := ...
+client, err := storage.NewClient(ctx, option.WithAuthCredentials(creds))
+```
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
+
+## Links
+
+- [Go on Google Cloud](https://cloud.google.com/go/home)
+- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
+- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
+- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
+- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md
new file mode 100644
index 000000000..6d0fcf4f9
--- /dev/null
+++ b/vendor/cloud.google.com/go/RELEASING.md
@@ -0,0 +1,141 @@
+# Releasing
+
+## Determine which module to release
+
+The Go client libraries have several modules. Each module does not strictly
+correspond to a single library - they correspond to trees of directories. If a
+file needs to be released, you must release the closest ancestor module.
+
+To see all modules:
+
+```bash
+$ cat `find . -name go.mod` | grep module
+module cloud.google.com/go/pubsub
+module cloud.google.com/go/spanner
+module cloud.google.com/go
+module cloud.google.com/go/bigtable
+module cloud.google.com/go/bigquery
+module cloud.google.com/go/storage
+module cloud.google.com/go/pubsublite
+module cloud.google.com/go/firestore
+module cloud.google.com/go/logging
+module cloud.google.com/go/internal/gapicgen
+module cloud.google.com/go/internal/godocfx
+module cloud.google.com/go/internal/examples/fake
+module cloud.google.com/go/internal/examples/mock
+module cloud.google.com/go/datastore
+```
+
+The `cloud.google.com/go` is the repository root module. Each other module is
+a submodule.
+
+So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
+ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
+version of the `cloud.google.com/go/bigtable` submodule.
+
+If you need to release a change in `asset/apiv1/asset_client.go`, the closest
+ancestor module is `cloud.google.com/go` - so you should release a new version
+of the `cloud.google.com/go` repository root module. Note: releasing
+`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
+They are released entirely independently.
+
+## Test failures
+
+If there are any test failures in the Kokoro build, releases are blocked until
+the failures have been resolved.
+
+## How to release
+
+### Automated Releases (`cloud.google.com/go` and submodules)
+
+We now use [release-please](https://github.com/googleapis/release-please) to
+perform automated releases for `cloud.google.com/go` and all submodules.
+
+1. If there are changes that have not yet been released, a
+ [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
+ be automatically opened by release-please
+ with a title like "chore: release X.Y.Z" (for the root module) or
+ "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
+ is the next version to be released. Find the desired pull request
+ [here](https://github.com/googleapis/google-cloud-go/pulls)
+1. Check for failures in the
+ [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+ any failures in the most recent build, address them before proceeding with
+ the release. (This applies even if the failures are in a different submodule
+ from the one being released.)
+1. Review the release notes. These are automatically generated from the titles
+ of any merged commits since the previous release. If you would like to edit
+ them, this can be done by updating the changes in the release PR.
+1. To cut a release, approve and merge the pull request. Doing so will
+ update the `CHANGES.md`, tag the merged commit with the appropriate version,
+ and draft a GitHub release which will copy the notes from `CHANGES.md`.
+
+### Manual Release (`cloud.google.com/go`)
+
+If for whatever reason the automated release process is not working as expected,
+here is how to manually cut a release of `cloud.google.com/go`.
+
+1. Check for failures in the
+ [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+ any failures in the most recent build, address them before proceeding with
+ the release.
+1. Navigate to `google-cloud-go/` and switch to main.
+1. `git pull`
+1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
+ The current latest tag `$CV` is the largest tag. It should look something
+ like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
+ specific library, not the module root). We'll call the current version `$CV`
+ and the new version `$NV`.
+1. On main, run `git log $CV...` to list all the changes since the last
+ release. NOTE: You must manually visually parse out changes to submodules [1]
+ (the `git log` is going to show you things in submodules, which are not going
+ to be part of your release).
+1. Edit `CHANGES.md` to include a summary of the changes.
+1. In `internal/version/version.go`, update `const Repo` to today's date with
+ the format `YYYYMMDD`.
+1. In `internal/version` run `go generate`.
+1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
+ and create a PR titled `chore: release $NV`.
+1. Wait for the PR to be reviewed and merged. Once it's merged, and without
+ merging any other PRs in the meantime:
+ a. Switch to main.
+ b. `git pull`
+ c. Tag the repo with the next version: `git tag $NV`.
+ d. Push the tag to origin:
+ `git push origin $NV`
+1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
+ with the new release, copying the contents of `CHANGES.md`.
+
+### Manual Releases (submodules)
+
+If for whatever reason the automated release process is not working as expected,
+here is how to manually cut a release of a submodule.
+
+(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
+
+1. Check for failures in the
+ [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
+ any failures in the most recent build, address them before proceeding with
+ the release. (This applies even if the failures are in a different submodule
+ from the one being released.)
+1. Navigate to `google-cloud-go/` and switch to main.
+1. `git pull`
+1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
+ existing releases. The current latest tag `$CV` is the largest tag. It
+ should look something like `datastore/vX.Y.Z`. We'll call the current version
+ `$CV` and the new version `$NV`.
+1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
+ submodule directory since the last release.
+1. Edit `datastore/CHANGES.md` to include a summary of the changes.
+1. In `internal/version` run `go generate`.
+1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
+ and create a PR titled `chore(datastore): release $NV`.
+1. Wait for the PR to be reviewed and merged. Once it's merged, and without
+ merging any other PRs in the meantime:
+ a. Switch to main.
+ b. `git pull`
+ c. Tag the repo with the next version: `git tag $NV`.
+ d. Push the tag to origin:
+ `git push origin $NV`
+1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
+ with the new release, copying the contents of `datastore/CHANGES.md`.
diff --git a/vendor/cloud.google.com/go/SECURITY.md b/vendor/cloud.google.com/go/SECURITY.md
new file mode 100644
index 000000000..8b58ae9c0
--- /dev/null
+++ b/vendor/cloud.google.com/go/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
+
+The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
+
+We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
new file mode 100644
index 000000000..500c34cf4
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -0,0 +1,396 @@
+# Changelog
+
+## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19)
+
+
+### Features
+
+* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699))
+
+## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24)
+
+
+### Documentation
+
+* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941))
+
+## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08)
+
+
+### Features
+
+* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379)
+* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13)
+
+
+### Features
+
+* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d))
+* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f))
+
+
+### Bug Fixes
+
+* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90))
+
+## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10)
+
+
+### Bug Fixes
+
+* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1))
+
+## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04)
+
+
+### Features
+
+* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005)
+
+
+### Bug Fixes
+
+* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188)
+
+## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21)
+
+
+### Features
+
+* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344))
+
+## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12)
+
+
+### Bug Fixes
+
+* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556)
+
+## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06)
+
+
+### Bug Fixes
+
+* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2))
+* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6))
+
+## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30)
+
+
+### Features
+
+* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b))
+
+## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22)
+
+
+### Bug Fixes
+
+* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844)
+* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114))
+
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
+## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
+
+
+### Bug Fixes
+
+* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804)
+
+## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30)
+
+
+### Bug Fixes
+
+* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742)
+* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795)
+
+
+### Documentation
+
+* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437))
+
+## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22)
+
+
+### Bug Fixes
+
+* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948))
+
+## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16)
+
+
+### Features
+
+* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45))
+
+## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13)
+
+
+### Bug Fixes
+
+* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638)
+
+## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07)
+
+
+### Features
+
+* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b))
+
+## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8))
+* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22)
+
+
+### Bug Fixes
+
+* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544)
+
+## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
+## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09)
+
+
+### Features
+
+* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942))
+
+## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01)
+
+
+### Bug Fixes
+
+* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c))
+* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25)
+
+
+### Features
+
+* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e))
+
+
+### Bug Fixes
+
+* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24)
+
+
+### Bug Fixes
+
+* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414)
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31)
+
+
+### Bug Fixes
+
+* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28)
+
+
+### Features
+
+* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150))
+
+## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16)
+
+
+### Bug Fixes
+
+* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15))
+* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159)
+* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8))
+* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea))
+
+## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09)
+
+
+### Bug Fixes
+
+* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc))
+
+## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07)
+
+
+### Features
+
+* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c))
+
+
+### Bug Fixes
+
+* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06))
+
+## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23)
+
+
+### Features
+
+* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814)
+
+
+### Bug Fixes
+
+* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809)
+
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
+
+
+### Bug Fixes
+
+* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
+* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
+
+## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
+
+
+### Bug Fixes
+
+* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
+
+## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
+
+### Breaking Changes
+
+In the below mentioned commits there were a few large breaking changes since the
+last release of the module.
+
+1. The `Credentials` type has been moved to the root of the module as it is
+ becoming the core abstraction for the whole module.
+2. Because of the above mentioned change many functions that previously
+ returned a `TokenProvider` now return `Credentials`. Similarly, these
+ functions have been renamed to be more specific.
+3. Most places that used to take an optional `TokenProvider` now accept
+ `Credentials`. You can make a `Credentials` from a `TokenProvider` using the
+ constructor found in the `auth` package.
+4. The `detect` package has been renamed to `credentials`. With this change some
+ function signatures were also updated for better readability.
+5. Derivative auth flows like `impersonate` and `downscope` have been moved to
+ be under the new `credentials` package.
+
+Although these changes are disruptive we think that they are for the best of the
+long-term health of the module. We do not expect any more large breaking changes
+like these in future revisions, even before 1.0.0. This version will be the
+first version of the auth library that our client libraries start to use and
+depend on.
+
+### Features
+
+* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6))
+* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d))
+* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670)
+* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501))
+* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac))
+* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d))
+* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
+* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd))
+* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824))
+* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9))
+* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10)
+
+
+### Bug Fixes
+
+* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136)
+* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
+* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
+
+## 0.1.0 (2023-10-18)
+
+
+### Features
+
+* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e))
+* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993))
+* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8))
+* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7))
+* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c))
+* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5))
+* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff))
+* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe))
+* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04))
+* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507))
+* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
new file mode 100644
index 000000000..6fe4f0763
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -0,0 +1,40 @@
+# Google Auth Library for Go
+
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
new file mode 100644
index 000000000..cd5e98868
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -0,0 +1,618 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package auth provides utilities for managing Google Cloud credentials,
+// including functionality for creating, caching, and refreshing OAuth2 tokens.
+// It offers customizable options for different OAuth2 flows, such as 2-legged
+// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic
+// token management.
+package auth
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // Parameter keys for AuthCodeURL method to support PKCE.
+ codeChallengeKey = "code_challenge"
+ codeChallengeMethodKey = "code_challenge_method"
+
+ // Parameter key for Exchange method to support PKCE.
+ codeVerifierKey = "code_verifier"
+
+ // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
+ // so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
+ defaultExpiryDelta = 225 * time.Second
+
+ universeDomainDefault = "googleapis.com"
+)
+
+// tokenState represents different states for a [Token].
+type tokenState int
+
+const (
+ // fresh indicates that the [Token] is valid. It is not expired or close to
+ // expired, or the token has no expiry.
+ fresh tokenState = iota
+ // stale indicates that the [Token] is close to expired, and should be
+ // refreshed. The token can be used normally.
+ stale
+ // invalid indicates that the [Token] is expired or invalid. The token
+ // cannot be used for a normal operation.
+ invalid
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType}
+
+ // for testing
+ timeNow = time.Now
+)
+
+// TokenProvider specifies an interface for anything that can return a token.
+type TokenProvider interface {
+ // Token returns a Token or an error.
+ // The Token returned must be safe to use
+ // concurrently.
+ // The returned Token must not be modified.
+ // The context provided must be sent along to any requests that are made in
+ // the implementing code.
+ Token(context.Context) (*Token, error)
+}
+
+// Token holds the credential token used to authorized requests. All fields are
+// considered read-only.
+type Token struct {
+ // Value is the token used to authorize requests. It is usually an access
+ // token but may be other types of tokens such as ID tokens in some flows.
+ Value string
+ // Type is the type of token Value is. If uninitialized, it should be
+ // assumed to be a "Bearer" token.
+ Type string
+ // Expiry is the time the token is set to expire.
+ Expiry time.Time
+ // Metadata may include, but is not limited to, the body of the token
+ // response returned by the server.
+ Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values
+}
+
+// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not
+// expired. A token is considered expired if [Token.Expiry] has passed or will
+// pass in the next 225 seconds.
+func (t *Token) IsValid() bool {
+ return t.isValidWithEarlyExpiry(defaultExpiryDelta)
+}
+
+// MetadataString is a convenience method for accessing string values in the
+// token's metadata. Returns an empty string if the metadata is nil or the value
+// for the given key cannot be cast to a string.
+func (t *Token) MetadataString(k string) string {
+ if t.Metadata == nil {
+ return ""
+ }
+ s, ok := t.Metadata[k].(string)
+ if !ok {
+ return ""
+ }
+ return s
+}
+
+func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
+ if t.isEmpty() {
+ return false
+ }
+ if t.Expiry.IsZero() {
+ return true
+ }
+ return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow())
+}
+
+func (t *Token) isEmpty() bool {
+ return t == nil || t.Value == ""
+}
+
+// Credentials holds Google credentials, including
+// [Application Default Credentials].
+//
+// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials
+type Credentials struct {
+ json []byte
+ projectID CredentialsPropertyProvider
+ quotaProjectID CredentialsPropertyProvider
+ // universeDomain is the default service domain for a given Cloud universe.
+ universeDomain CredentialsPropertyProvider
+
+ TokenProvider
+}
+
+// JSON returns the bytes associated with the the file used to source
+// credentials if one was used.
+func (c *Credentials) JSON() []byte {
+ return c.json
+}
+
+// ProjectID returns the associated project ID from the underlying file or
+// environment.
+func (c *Credentials) ProjectID(ctx context.Context) (string, error) {
+ if c.projectID == nil {
+ return internal.GetProjectID(c.json, ""), nil
+ }
+ v, err := c.projectID.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ return internal.GetProjectID(c.json, v), nil
+}
+
+// QuotaProjectID returns the associated quota project ID from the underlying
+// file or environment.
+func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) {
+ if c.quotaProjectID == nil {
+ return internal.GetQuotaProject(c.json, ""), nil
+ }
+ v, err := c.quotaProjectID.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ return internal.GetQuotaProject(c.json, v), nil
+}
+
+// UniverseDomain returns the default service domain for a given Cloud universe.
+// The default value is "googleapis.com".
+func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) {
+ if c.universeDomain == nil {
+ return universeDomainDefault, nil
+ }
+ v, err := c.universeDomain.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ if v == "" {
+ return universeDomainDefault, nil
+ }
+ return v, err
+}
+
+// CredentialsPropertyProvider provides an implementation to fetch a property
+// value for [Credentials].
+type CredentialsPropertyProvider interface {
+ GetProperty(context.Context) (string, error)
+}
+
+// CredentialsPropertyFunc is a type adapter to allow the use of ordinary
+// functions as a [CredentialsPropertyProvider].
+type CredentialsPropertyFunc func(context.Context) (string, error)
+
+// GetProperty loads the properly value provided the given context.
+func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) {
+ return p(ctx)
+}
+
+// CredentialsOptions are used to configure [Credentials].
+type CredentialsOptions struct {
+ // TokenProvider is a means of sourcing a token for the credentials. Required.
+ TokenProvider TokenProvider
+ // JSON is the raw contents of the credentials file if sourced from a file.
+ JSON []byte
+ // ProjectIDProvider resolves the project ID associated with the
+ // credentials.
+ ProjectIDProvider CredentialsPropertyProvider
+ // QuotaProjectIDProvider resolves the quota project ID associated with the
+ // credentials.
+ QuotaProjectIDProvider CredentialsPropertyProvider
+ // UniverseDomainProvider resolves the universe domain with the credentials.
+ UniverseDomainProvider CredentialsPropertyProvider
+}
+
+// NewCredentials returns new [Credentials] from the provided options.
+func NewCredentials(opts *CredentialsOptions) *Credentials {
+ creds := &Credentials{
+ TokenProvider: opts.TokenProvider,
+ json: opts.JSON,
+ projectID: opts.ProjectIDProvider,
+ quotaProjectID: opts.QuotaProjectIDProvider,
+ universeDomain: opts.UniverseDomainProvider,
+ }
+
+ return creds
+}
+
+// CachedTokenProviderOptions provides options for configuring a cached
+// [TokenProvider].
+type CachedTokenProviderOptions struct {
+ // DisableAutoRefresh makes the TokenProvider always return the same token,
+ // even if it is expired. The default is false. Optional.
+ DisableAutoRefresh bool
+ // ExpireEarly configures the amount of time before a token expires, that it
+ // should be refreshed. If unset, the default value is 3 minutes and 45
+ // seconds. Optional.
+ ExpireEarly time.Duration
+ // DisableAsyncRefresh configures a synchronous workflow that refreshes
+ // tokens in a blocking manner. The default is false. Optional.
+ DisableAsyncRefresh bool
+}
+
+func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
+ if ctpo == nil {
+ return true
+ }
+ return !ctpo.DisableAutoRefresh
+}
+
+func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
+ if ctpo == nil || ctpo.ExpireEarly == 0 {
+ return defaultExpiryDelta
+ }
+ return ctpo.ExpireEarly
+}
+
+func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool {
+ if ctpo == nil {
+ return false
+ }
+ return ctpo.DisableAsyncRefresh
+}
+
+// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
+// by the underlying provider. By default it will refresh tokens asynchronously
+// a few minutes before they expire.
+func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
+ if ctp, ok := tp.(*cachedTokenProvider); ok {
+ return ctp
+ }
+ return &cachedTokenProvider{
+ tp: tp,
+ autoRefresh: opts.autoRefresh(),
+ expireEarly: opts.expireEarly(),
+ blockingRefresh: opts.blockingRefresh(),
+ }
+}
+
+type cachedTokenProvider struct {
+ tp TokenProvider
+ autoRefresh bool
+ expireEarly time.Duration
+ blockingRefresh bool
+
+ mu sync.Mutex
+ cachedToken *Token
+ // isRefreshRunning ensures that the non-blocking refresh will only be
+ // attempted once, even if multiple callers enter the Token method.
+ isRefreshRunning bool
+ // isRefreshErr ensures that the non-blocking refresh will only be attempted
+ // once per refresh window if an error is encountered.
+ isRefreshErr bool
+}
+
+func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) {
+ if c.blockingRefresh {
+ return c.tokenBlocking(ctx)
+ }
+ return c.tokenNonBlocking(ctx)
+}
+
+func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) {
+ switch c.tokenState() {
+ case fresh:
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.cachedToken, nil
+ case stale:
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
+ // Return the stale token immediately to not block customer requests to Cloud services.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.cachedToken, nil
+ default: // invalid
+ return c.tokenBlocking(ctx)
+ }
+}
+
+// tokenState reports the token's validity.
+func (c *cachedTokenProvider) tokenState() tokenState {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ t := c.cachedToken
+ now := timeNow()
+ if t == nil || t.Value == "" {
+ return invalid
+ } else if t.Expiry.IsZero() {
+ return fresh
+ } else if now.After(t.Expiry.Round(0)) {
+ return invalid
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ return stale
+ }
+ return fresh
+}
+
+// tokenAsync uses a bool to ensure that only one non-blocking token refresh
+// happens at a time, even if multiple callers have entered this function
+// concurrently. This avoids creating an arbitrary number of concurrent
+// goroutines. Retries should be attempted and managed within the Token method.
+// If the refresh attempt fails, no further attempts are made until the refresh
+// window expires and the token enters the invalid state, at which point the
+// blocking call to Token should likely return the same error on the main goroutine.
+func (c *cachedTokenProvider) tokenAsync(ctx context.Context) {
+ fn := func() {
+ c.mu.Lock()
+ c.isRefreshRunning = true
+ c.mu.Unlock()
+ t, err := c.tp.Token(ctx)
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.isRefreshRunning = false
+ if err != nil {
+ // Discard errors from the non-blocking refresh, but prevent further
+ // attempts.
+ c.isRefreshErr = true
+ return
+ }
+ c.cachedToken = t
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if !c.isRefreshRunning && !c.isRefreshErr {
+ go fn()
+ }
+}
+
+func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.isRefreshErr = false
+ if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) {
+ return c.cachedToken, nil
+ }
+ t, err := c.tp.Token(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.cachedToken = t
+ return t, nil
+}
+
+// Error is a error associated with retrieving a [Token]. It can hold useful
+// additional details for debugging.
+type Error struct {
+ // Response is the HTTP response associated with error. The body will always
+ // be already closed and consumed.
+ Response *http.Response
+ // Body is the HTTP response body.
+ Body []byte
+ // Err is the underlying wrapped error.
+ Err error
+
+ // code returned in the token response
+ code string
+ // description returned in the token response
+ description string
+ // uri returned in the token response
+ uri string
+}
+
+func (e *Error) Error() string {
+ if e.code != "" {
+ s := fmt.Sprintf("auth: %q", e.code)
+ if e.description != "" {
+ s += fmt.Sprintf(" %q", e.description)
+ }
+ if e.uri != "" {
+ s += fmt.Sprintf(" %q", e.uri)
+ }
+ return s
+ }
+ return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body)
+}
+
+// Temporary returns true if the error is considered temporary and may be able
+// to be retried.
+func (e *Error) Temporary() bool {
+ if e.Response == nil {
+ return false
+ }
+ sc := e.Response.StatusCode
+ return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests
+}
+
+func (e *Error) Unwrap() error {
+ return e.Err
+}
+
+// Style describes how the token endpoint wants to receive the ClientID and
+// ClientSecret.
+type Style int
+
+const (
+ // StyleUnknown means the value has not been initiated. Sending this in
+ // a request will cause the token exchange to fail.
+ StyleUnknown Style = iota
+ // StyleInParams sends client info in the body of a POST request.
+ StyleInParams
+ // StyleInHeader sends client info using Basic Authorization header.
+ StyleInHeader
+)
+
+// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow.
+type Options2LO struct {
+ // Email is the OAuth2 client ID. This value is set as the "iss" in the
+ // JWT.
+ Email string
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. It is used to sign
+ // the JWT created.
+ PrivateKey []byte
+ // TokenURL is th URL the JWT is sent to. Required.
+ TokenURL string
+ // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the
+ // "kid" in the JWT header. Optional.
+ PrivateKeyID string
+ // Subject is the used for to impersonate a user. It is used as the "sub" in
+ // the JWT.m Optional.
+ Subject string
+ // Scopes specifies requested permissions for the token. Optional.
+ Scopes []string
+ // Expires specifies the lifetime of the token. Optional.
+ Expires time.Duration
+ // Audience specifies the "aud" in the JWT. Optional.
+ Audience string
+ // PrivateClaims allows specifying any custom claims for the JWT. Optional.
+ PrivateClaims map[string]interface{}
+
+ // Client is the client to be used to make the underlying token requests.
+ // Optional.
+ Client *http.Client
+ // UseIDToken requests that the token returned be an ID token if one is
+ // returned from the server. Optional.
+ UseIDToken bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options2LO) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+func (o *Options2LO) validate() error {
+ if o == nil {
+ return errors.New("auth: options must be provided")
+ }
+ if o.Email == "" {
+ return errors.New("auth: email must be provided")
+ }
+ if len(o.PrivateKey) == 0 {
+ return errors.New("auth: private key must be provided")
+ }
+ if o.TokenURL == "" {
+ return errors.New("auth: token URL must be provided")
+ }
+ return nil
+}
+
+// New2LOTokenProvider returns a [TokenProvider] from the provided options.
+func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil
+}
+
+type tokenProvider2LO struct {
+ opts *Options2LO
+ Client *http.Client
+ logger *slog.Logger
+}
+
+func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
+ pk, err := internal.ParseKey(tp.opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ claimSet := &jwt.Claims{
+ Iss: tp.opts.Email,
+ Scope: strings.Join(tp.opts.Scopes, " "),
+ Aud: tp.opts.TokenURL,
+ AdditionalClaims: tp.opts.PrivateClaims,
+ Sub: tp.opts.Subject,
+ }
+ if t := tp.opts.Expires; t > 0 {
+ claimSet.Exp = time.Now().Add(t).Unix()
+ }
+ if aud := tp.opts.Audience; aud != "" {
+ claimSet.Aud = aud
+ }
+ h := *defaultHeader
+ h.KeyID = tp.opts.PrivateKeyID
+ payload, err := jwt.EncodeJWS(&h, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ resp, body, err := internal.DoRequest(tp.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
+ }
+ tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return nil, &Error{
+ Response: resp,
+ Body: body,
+ }
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"`
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
+ }
+ token := &Token{
+ Value: tokenRes.AccessToken,
+ Type: tokenRes.TokenType,
+ }
+ token.Metadata = make(map[string]interface{})
+ json.Unmarshal(body, &token.Metadata) // no error checks for optional fields
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jwt.DecodeJWS(v)
+ if err != nil {
+ return nil, fmt.Errorf("auth: error decoding JWT token: %w", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ if tp.opts.UseIDToken {
+ if tokenRes.IDToken == "" {
+ return nil, fmt.Errorf("auth: response doesn't have JWT token")
+ }
+ token.Value = tokenRes.IDToken
+ }
+ return token, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
new file mode 100644
index 000000000..e4a8078f8
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/compute.go
@@ -0,0 +1,102 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/compute/metadata"
+)
+
+var (
+ computeTokenMetadata = map[string]interface{}{
+ "auth.google.tokenSource": "compute-metadata",
+ "auth.google.serviceAccount": "default",
+ }
+ computeTokenURI = "instance/service-accounts/default/token"
+)
+
+// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
+// uses the metadata service to retrieve tokens.
+func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider {
+ return auth.NewCachedTokenProvider(&computeProvider{
+ scopes: opts.Scopes,
+ client: client,
+ tokenBindingType: opts.TokenBindingType,
+ }, &auth.CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenRefresh,
+ DisableAsyncRefresh: opts.DisableAsyncRefresh,
+ })
+}
+
+// computeProvider fetches tokens from the google cloud metadata service.
+type computeProvider struct {
+ scopes []string
+ client *metadata.Client
+ tokenBindingType TokenBindingType
+}
+
+type metadataTokenResp struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+}
+
+func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
+ tokenURI, err := url.Parse(computeTokenURI)
+ if err != nil {
+ return nil, err
+ }
+ hasScopes := len(cs.scopes) > 0
+ if hasScopes || cs.tokenBindingType != NoBinding {
+ v := url.Values{}
+ if hasScopes {
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ }
+ switch cs.tokenBindingType {
+ case MTLSHardBinding:
+ v.Set("transport", "mtls")
+ v.Set("binding-enforcement", "on")
+ case ALTSHardBinding:
+ v.Set("transport", "alts")
+ }
+ tokenURI.RawQuery = v.Encode()
+ }
+ tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String())
+ if err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ var res metadataTokenResp
+ if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {
+ return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, errors.New("credentials: incomplete token received from metadata")
+ }
+ return &auth.Token{
+ Value: res.AccessToken,
+ Type: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ Metadata: computeTokenMetadata,
+ }, nil
+
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
new file mode 100644
index 000000000..d8f7d9614
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -0,0 +1,316 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/compute/metadata"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow.
+ jwtTokenURL = "https://oauth2.googleapis.com/token"
+
+ // Google's OAuth 2.0 default endpoints.
+ googleAuthURL = "https://accounts.google.com/o/oauth2/auth"
+ googleTokenURL = "https://oauth2.googleapis.com/token"
+
+ // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint.
+ GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token"
+
+ // Help on default credentials
+ adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
+)
+
+var (
+ // for testing
+ allowOnGCECheck = true
+)
+
+// TokenBindingType specifies the type of binding used when requesting a token
+// whether to request a hard-bound token using mTLS or an instance identity
+// bound token using ALTS.
+type TokenBindingType int
+
+const (
+ // NoBinding specifies that requested tokens are not required to have a
+ // binding. This is the default option.
+ NoBinding TokenBindingType = iota
+ // MTLSHardBinding specifies that a hard-bound token should be requested
+ // using an mTLS with S2A channel.
+ MTLSHardBinding
+ // ALTSHardBinding specifies that an instance identity bound token should
+ // be requested using an ALTS channel.
+ ALTSHardBinding
+)
+
+// OnGCE reports whether this process is running in Google Cloud.
+func OnGCE() bool {
+ // TODO(codyoss): once all libs use this auth lib move metadata check here
+ return allowOnGCECheck && metadata.OnGCE()
+}
+
+// DetectDefault searches for "Application Default Credentials" and returns
+// a credential based on the [DetectOptions] provided.
+//
+// It looks for credentials in the following places, preferring the first
+// location found:
+//
+// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS
+// environment variable. For workload identity federation, refer to
+// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation
+// on how to generate the JSON configuration file for on-prem/non-Google
+// cloud platforms.
+// - A JSON file in a location known to the gcloud command-line tool. On
+// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On
+// other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// - On Google Compute Engine, Google App Engine standard second generation
+// runtimes, and Google App Engine flexible environment, it fetches
+// credentials from the metadata server.
+func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ if len(opts.CredentialsJSON) > 0 {
+ return readCredentialsFileJSON(opts.CredentialsJSON, opts)
+ }
+ if opts.CredentialsFile != "" {
+ return readCredentialsFile(opts.CredentialsFile, opts)
+ }
+ if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" {
+ creds, err := readCredentialsFile(filename, opts)
+ if err != nil {
+ return nil, err
+ }
+ return creds, nil
+ }
+
+ fileName := credsfile.GetWellKnownFileName()
+ if b, err := os.ReadFile(fileName); err == nil {
+ return readCredentialsFileJSON(b, opts)
+ }
+
+ if OnGCE() {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: opts.logger(),
+ })
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: computeTokenProvider(opts, metadataClient),
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return metadataClient.ProjectIDWithContext(ctx)
+ }),
+ UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
+ MetadataClient: metadataClient,
+ },
+ }), nil
+ }
+
+ return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL)
+}
+
+// DetectOptions provides configuration for [DetectDefault].
+type DetectOptions struct {
+ // Scopes that credentials tokens should have. Example:
+ // https://www.googleapis.com/auth/cloud-platform. Required if Audience is
+ // not provided.
+ Scopes []string
+ // TokenBindingType specifies the type of binding used when requesting a
+ // token whether to request a hard-bound token using mTLS or an instance
+ // identity bound token using ALTS. Optional.
+ TokenBindingType TokenBindingType
+ // Audience that credentials tokens should have. Only applicable for 2LO
+ // flows with service accounts. If specified, scopes should not be provided.
+ Audience string
+ // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority).
+ // Optional.
+ Subject string
+ // EarlyTokenRefresh configures how early before a token expires that it
+ // should be refreshed. Once the token’s time until expiration has entered
+ // this refresh window the token is considered valid but stale. If unset,
+ // the default value is 3 minutes and 45 seconds. Optional.
+ EarlyTokenRefresh time.Duration
+ // DisableAsyncRefresh configures a synchronous workflow that refreshes
+ // stale tokens while blocking. The default is false. Optional.
+ DisableAsyncRefresh bool
+ // AuthHandlerOptions configures an authorization handler and other options
+ // for 3LO flows. It is required, and only used, for client credential
+ // flows.
+ AuthHandlerOptions *auth.AuthorizationHandlerOptions
+ // TokenURL allows to set the token endpoint for user credential flows. If
+ // unset the default value is: https://oauth2.googleapis.com/token.
+ // Optional.
+ TokenURL string
+ // STSAudience is the audience sent to when retrieving an STS token.
+ // Currently this only used for GDCH auth flow, for which it is required.
+ STSAudience string
+ // CredentialsFile overrides detection logic and sources a credential file
+ // from the provided filepath. If provided, CredentialsJSON must not be.
+ // Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
+ CredentialsFile string
+ // CredentialsJSON overrides detection logic and uses the JSON bytes as the
+ // source for the credential. If provided, CredentialsFile must not be.
+ // Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
+ CredentialsJSON []byte
+ // UseSelfSignedJWT directs service account based credentials to create a
+ // self-signed JWT with the private key found in the file, skipping any
+ // network requests that would normally be made. Optional.
+ UseSelfSignedJWT bool
+ // Client configures the underlying client used to make network requests
+ // when fetching tokens. Optional.
+ Client *http.Client
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // The default value is "googleapis.com". This option is ignored for
+ // authentication flows that do not support universe domain. Optional.
+ UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *DetectOptions) validate() error {
+ if o == nil {
+ return errors.New("credentials: options must be provided")
+ }
+ if len(o.Scopes) > 0 && o.Audience != "" {
+ return errors.New("credentials: both scopes and audience were provided")
+ }
+ if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" {
+ return errors.New("credentials: both credentials file and JSON were provided")
+ }
+ return nil
+}
+
+func (o *DetectOptions) tokenURL() string {
+ if o.TokenURL != "" {
+ return o.TokenURL
+ }
+ return googleTokenURL
+}
+
+func (o *DetectOptions) scopes() []string {
+ scopes := make([]string, len(o.Scopes))
+ copy(scopes, o.Scopes)
+ return scopes
+}
+
+func (o *DetectOptions) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+func (o *DetectOptions) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return readCredentialsFileJSON(b, opts)
+}
+
+func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
+ // attempt to parse jsonData as a Google Developers Console client_credentials.json.
+ config := clientCredConfigFromJSON(b, opts)
+ if config != nil {
+ if config.AuthHandlerOpts == nil {
+ return nil, errors.New("credentials: auth handler must be specified for this credential filetype")
+ }
+ tp, err := auth.New3LOTokenProvider(config)
+ if err != nil {
+ return nil, err
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: tp,
+ JSON: b,
+ }), nil
+ }
+ return fileCredentials(b, opts)
+}
+
+func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
+ var creds credsfile.ClientCredentialsFile
+ var c *credsfile.Config3LO
+ if err := json.Unmarshal(b, &creds); err != nil {
+ return nil
+ }
+ switch {
+ case creds.Web != nil:
+ c = creds.Web
+ case creds.Installed != nil:
+ c = creds.Installed
+ default:
+ return nil
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil
+ }
+ var handleOpts *auth.AuthorizationHandlerOptions
+ if opts.AuthHandlerOptions != nil {
+ handleOpts = &auth.AuthorizationHandlerOptions{
+ Handler: opts.AuthHandlerOptions.Handler,
+ State: opts.AuthHandlerOptions.State,
+ PKCEOpts: opts.AuthHandlerOptions.PKCEOpts,
+ }
+ }
+ return &auth.Options3LO{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: opts.scopes(),
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ EarlyTokenExpiry: opts.EarlyTokenRefresh,
+ AuthHandlerOpts: handleOpts,
+ // TODO(codyoss): refactor this out. We need to add in auto-detection
+ // for this use case.
+ AuthStyle: auth.StyleInParams,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go
new file mode 100644
index 000000000..1dbb2866b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs. It supports the Web server flow,
+// client-side credentials, service accounts, Google Compute Engine service
+// accounts, Google App Engine service accounts and workload identity federation
+// from non-Google cloud platforms.
+//
+// A brief overview of the package follows. For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+// For more information on using workload identity federation, refer to
+// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
+//
+// # Credentials
+//
+// The [cloud.google.com/go/auth.Credentials] type represents Google
+// credentials, including Application Default Credentials.
+//
+// Use [DetectDefault] to obtain Application Default Credentials.
+//
+// Application Default Credentials support workload identity federation to
+// access Google Cloud resources from non-Google Cloud platforms including Amazon
+// Web Services (AWS), Microsoft Azure or any identity provider that supports
+// OpenID Connect (OIDC). Workload identity federation is recommended for
+// non-Google Cloud environments as it avoids the need to download, manage, and
+// store service account private keys locally.
+//
+// # Workforce Identity Federation
+//
+// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount].
+package credentials
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
new file mode 100644
index 000000000..e5243e6cf
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -0,0 +1,231 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "errors"
+ "fmt"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/externalaccount"
+ "cloud.google.com/go/auth/credentials/internal/externalaccountuser"
+ "cloud.google.com/go/auth/credentials/internal/gdch"
+ "cloud.google.com/go/auth/credentials/internal/impersonate"
+ internalauth "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+)
+
+func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
+ fileType, err := credsfile.ParseFileType(b)
+ if err != nil {
+ return nil, err
+ }
+
+ var projectID, universeDomain string
+ var tp auth.TokenProvider
+ switch fileType {
+ case credsfile.ServiceAccountKey:
+ f, err := credsfile.ParseServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ projectID = f.ProjectID
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.UserCredentialsKey:
+ f, err := credsfile.ParseUserCredentials(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleUserCredential(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = f.UniverseDomain
+ case credsfile.ExternalAccountKey:
+ f, err := credsfile.ParseExternalAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleExternalAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.ExternalAccountAuthorizedUserKey:
+ f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleExternalAccountAuthorizedUser(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = f.UniverseDomain
+ case credsfile.ImpersonatedServiceAccountKey:
+ f, err := credsfile.ParseImpersonatedServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleImpersonatedServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.GDCHServiceAccountKey:
+ f, err := credsfile.ParseGDCHServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleGDCHServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ projectID = f.Project
+ universeDomain = f.UniverseDomain
+ default:
+ return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType)
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenRefresh,
+ }),
+ JSON: b,
+ ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
+ // TODO(codyoss): only set quota project here if there was a user override
+ UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
+ }), nil
+}
+
+// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to
+// support configuring universe-specific credentials in code. Auth flows
+// unsupported for universe domain should not use this func, but should instead
+// simply set the file universe domain on the credentials.
+func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string {
+ if optsUniverseDomain != "" {
+ return optsUniverseDomain
+ }
+ return fileUniverseDomain
+}
+
+func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ if opts.UseSelfSignedJWT {
+ return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
+ }
+ opts2LO := &auth.Options2LO{
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: opts.scopes(),
+ TokenURL: f.TokenURL,
+ Subject: opts.Subject,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ if opts2LO.TokenURL == "" {
+ opts2LO.TokenURL = jwtTokenURL
+ }
+ return auth.New2LOTokenProvider(opts2LO)
+}
+
+func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ opts3LO := &auth.Options3LO{
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: opts.scopes(),
+ AuthURL: googleAuthURL,
+ TokenURL: opts.tokenURL(),
+ AuthStyle: auth.StyleInParams,
+ EarlyTokenExpiry: opts.EarlyTokenRefresh,
+ RefreshToken: f.RefreshToken,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ return auth.New3LOTokenProvider(opts3LO)
+}
+
+func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ externalOpts := &externalaccount.Options{
+ Audience: f.Audience,
+ SubjectTokenType: f.SubjectTokenType,
+ TokenURL: f.TokenURL,
+ TokenInfoURL: f.TokenInfoURL,
+ ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL,
+ ClientSecret: f.ClientSecret,
+ ClientID: f.ClientID,
+ CredentialSource: f.CredentialSource,
+ QuotaProjectID: f.QuotaProjectID,
+ Scopes: opts.scopes(),
+ WorkforcePoolUserProject: f.WorkforcePoolUserProject,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ IsDefaultClient: opts.Client == nil,
+ }
+ if f.ServiceAccountImpersonation != nil {
+ externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
+ }
+ return externalaccount.NewTokenProvider(externalOpts)
+}
+
+func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ externalOpts := &externalaccountuser.Options{
+ Audience: f.Audience,
+ RefreshToken: f.RefreshToken,
+ TokenURL: f.TokenURL,
+ TokenInfoURL: f.TokenInfoURL,
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: opts.scopes(),
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ return externalaccountuser.NewTokenProvider(externalOpts)
+}
+
+func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil {
+ return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
+ }
+
+ tp, err := fileCredentials(f.CredSource, opts)
+ if err != nil {
+ return nil, err
+ }
+ return impersonate.NewTokenProvider(&impersonate.Options{
+ URL: f.ServiceAccountImpersonationURL,
+ Scopes: opts.scopes(),
+ Tp: tp,
+ Delegates: f.Delegates,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ })
+}
+
+func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ return gdch.NewTokenProvider(f, &gdch.Options{
+ STSAudience: opts.STSAudience,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ })
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
new file mode 100644
index 000000000..9ecd1f64b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -0,0 +1,531 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ // getenv aliases os.Getenv for testing
+ getenv = os.Getenv
+)
+
+const (
+ // AWS Signature Version 4 signing algorithm identifier.
+ awsAlgorithm = "AWS4-HMAC-SHA256"
+
+ // The termination string for the AWS credential scope value as defined in
+ // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+ awsRequestType = "aws4_request"
+
+ // The AWS authorization header name for the security session token if available.
+ awsSecurityTokenHeader = "x-amz-security-token"
+
+ // The name of the header containing the session token for metadata endpoint calls
+ awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
+
+ awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
+
+ awsIMDSv2SessionTTL = "300"
+
+ // The AWS authorization header name for the auto-generated date.
+ awsDateHeader = "x-amz-date"
+
+ defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+
+ // Supported AWS configuration environment variables.
+ awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
+ awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+ awsRegionEnvVar = "AWS_REGION"
+ awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
+ awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+
+ awsTimeFormatLong = "20060102T150405Z"
+ awsTimeFormatShort = "20060102"
+ awsProviderType = "aws"
+)
+
+type awsSubjectProvider struct {
+ EnvironmentID string
+ RegionURL string
+ RegionalCredVerificationURL string
+ CredVerificationURL string
+ IMDSv2SessionTokenURL string
+ TargetResource string
+ requestSigner *awsRequestSigner
+ region string
+ securityCredentialsProvider AwsSecurityCredentialsProvider
+ reqOpts *RequestOptions
+
+ Client *http.Client
+ logger *slog.Logger
+}
+
+func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ // Set Defaults
+ if sp.RegionalCredVerificationURL == "" {
+ sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
+ }
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
+ }
+ }
+
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
+ // Generate the signed request to AWS STS GetCallerIdentity API.
+ // Use the required regional endpoint. Otherwise, the request will fail.
+ req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
+ if err != nil {
+ return "", err
+ }
+ // The full, canonical resource name of the workload identity pool
+ // provider, with or without the HTTPS prefix.
+ // Including this header as part of the signature is recommended to
+ // ensure data integrity.
+ if sp.TargetResource != "" {
+ req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource)
+ }
+ sp.requestSigner.signRequest(req)
+
+ /*
+ The GCP STS endpoint expects the headers to be formatted as:
+ # [
+ # {key: 'x-amz-date', value: '...'},
+ # {key: 'Authorization', value: '...'},
+ # ...
+ # ]
+ # And then serialized as:
+ # quote(json.dumps({
+ # url: '...',
+ # method: 'POST',
+ # headers: [{key: 'x-amz-date', value: '...'}, ...]
+ # }))
+ */
+
+ awsSignedReq := awsRequest{
+ URL: req.URL.String(),
+ Method: "POST",
+ }
+ for headerKey, headerList := range req.Header {
+ for _, headerValue := range headerList {
+ awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{
+ Key: headerKey,
+ Value: headerValue,
+ })
+ }
+ }
+ sort.Slice(awsSignedReq.Headers, func(i, j int) bool {
+ headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key)
+ if headerCompare == 0 {
+ return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0
+ }
+ return headerCompare < 0
+ })
+
+ result, err := json.Marshal(awsSignedReq)
+ if err != nil {
+ return "", err
+ }
+ return url.QueryEscape(string(result)), nil
+}
+
+func (sp *awsSubjectProvider) providerType() string {
+ if sp.securityCredentialsProvider != nil {
+ return programmaticProviderType
+ }
+ return awsProviderType
+}
+
+func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) {
+ if sp.IMDSv2SessionTokenURL == "" {
+ return "", nil
+ }
+ req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil)
+ if err != nil {
+ return "", err
+ }
+ req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
+
+ sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body)
+ }
+ return string(body), nil
+}
+
+func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) {
+ if sp.securityCredentialsProvider != nil {
+ return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts)
+ }
+ if canRetrieveRegionFromEnvironment() {
+ if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" {
+ return envAwsRegion, nil
+ }
+ return getenv(awsDefaultRegionEnvVar), nil
+ }
+
+ if sp.RegionURL == "" {
+ return "", errors.New("credentials: unable to determine AWS region")
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil)
+ if err != nil {
+ return "", err
+ }
+
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+ sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body)
+ }
+
+ // This endpoint will return the region in format: us-east-2b.
+ // Only the us-east-2 part should be used.
+ bodyLen := len(body)
+ if bodyLen == 0 {
+ return "", nil
+ }
+ return string(body[:bodyLen-1]), nil
+}
+
+func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) {
+ if sp.securityCredentialsProvider != nil {
+ return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts)
+ }
+ if canRetrieveSecurityCredentialFromEnvironment() {
+ return &AwsSecurityCredentials{
+ AccessKeyID: getenv(awsAccessKeyIDEnvVar),
+ SecretAccessKey: getenv(awsSecretAccessKeyEnvVar),
+ SessionToken: getenv(awsSessionTokenEnvVar),
+ }, nil
+ }
+
+ roleName, err := sp.getMetadataRoleName(ctx, headers)
+ if err != nil {
+ return
+ }
+ credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers)
+ if err != nil {
+ return
+ }
+
+ if credentials.AccessKeyID == "" {
+ return result, errors.New("credentials: missing AccessKeyId credential")
+ }
+ if credentials.SecretAccessKey == "" {
+ return result, errors.New("credentials: missing SecretAccessKey credential")
+ }
+
+ return credentials, nil
+}
+
+func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) {
+ var result *AwsSecurityCredentials
+
+ req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil)
+ if err != nil {
+ return result, err
+ }
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+ sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return result, err
+ }
+ sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body)
+ }
+ if err := json.Unmarshal(body, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) {
+ if sp.CredVerificationURL == "" {
+ return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint")
+ }
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil)
+ if err != nil {
+ return "", err
+ }
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+
+ sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body)
+ }
+ return string(body), nil
+}
+
+// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
+type awsRequestSigner struct {
+ RegionName string
+ AwsSecurityCredentials *AwsSecurityCredentials
+}
+
+// signRequest adds the appropriate headers to an http.Request
+// or returns an error if something prevented this.
+func (rs *awsRequestSigner) signRequest(req *http.Request) error {
+ // req is assumed non-nil
+ signedRequest := cloneRequest(req)
+ timestamp := Now()
+ signedRequest.Header.Set("host", requestHost(req))
+ if rs.AwsSecurityCredentials.SessionToken != "" {
+ signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
+ }
+ if signedRequest.Header.Get("date") == "" {
+ signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong))
+ }
+ authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp)
+ if err != nil {
+ return err
+ }
+ signedRequest.Header.Set("Authorization", authorizationCode)
+ req.Header = signedRequest.Header
+ return nil
+}
+
+func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) {
+ canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req)
+ dateStamp := timestamp.Format(awsTimeFormatShort)
+ serviceName := ""
+
+ if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 {
+ serviceName = splitHost[0]
+ }
+ credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/")
+ requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData)
+ if err != nil {
+ return "", err
+ }
+ requestHash, err := getSha256([]byte(requestString))
+ if err != nil {
+ return "", err
+ }
+
+ stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n")
+ signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey)
+ for _, signingInput := range []string{
+ dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign,
+ } {
+ signingKey, err = getHmacSha256(signingKey, []byte(signingInput))
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil
+}
+
+func getSha256(input []byte) (string, error) {
+ hash := sha256.New()
+ if _, err := hash.Write(input); err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
+
+func getHmacSha256(key, input []byte) ([]byte, error) {
+ hash := hmac.New(sha256.New, key)
+ if _, err := hash.Write(input); err != nil {
+ return nil, err
+ }
+ return hash.Sum(nil), nil
+}
+
+func cloneRequest(r *http.Request) *http.Request {
+ r2 := new(http.Request)
+ *r2 = *r
+ if r.Header != nil {
+ r2.Header = make(http.Header, len(r.Header))
+
+ // Find total number of values.
+ headerCount := 0
+ for _, headerValues := range r.Header {
+ headerCount += len(headerValues)
+ }
+ copiedHeaders := make([]string, headerCount) // shared backing array for headers' values
+
+ for headerKey, headerValues := range r.Header {
+ headerCount = copy(copiedHeaders, headerValues)
+ r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount]
+ copiedHeaders = copiedHeaders[headerCount:]
+ }
+ }
+ return r2
+}
+
+func canonicalPath(req *http.Request) string {
+ result := req.URL.EscapedPath()
+ if result == "" {
+ return "/"
+ }
+ return path.Clean(result)
+}
+
+func canonicalQuery(req *http.Request) string {
+ queryValues := req.URL.Query()
+ for queryKey := range queryValues {
+ sort.Strings(queryValues[queryKey])
+ }
+ return queryValues.Encode()
+}
+
+func canonicalHeaders(req *http.Request) (string, string) {
+ // Header keys need to be sorted alphabetically.
+ var headers []string
+ lowerCaseHeaders := make(http.Header)
+ for k, v := range req.Header {
+ k := strings.ToLower(k)
+ if _, ok := lowerCaseHeaders[k]; ok {
+ // include additional values
+ lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...)
+ } else {
+ headers = append(headers, k)
+ lowerCaseHeaders[k] = v
+ }
+ }
+ sort.Strings(headers)
+
+ var fullHeaders bytes.Buffer
+ for _, header := range headers {
+ headerValue := strings.Join(lowerCaseHeaders[header], ",")
+ fullHeaders.WriteString(header)
+ fullHeaders.WriteRune(':')
+ fullHeaders.WriteString(headerValue)
+ fullHeaders.WriteRune('\n')
+ }
+
+ return strings.Join(headers, ";"), fullHeaders.String()
+}
+
+func requestDataHash(req *http.Request) (string, error) {
+ var requestData []byte
+ if req.Body != nil {
+ requestBody, err := req.GetBody()
+ if err != nil {
+ return "", err
+ }
+ defer requestBody.Close()
+
+ requestData, err = internal.ReadAll(requestBody)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return getSha256(requestData)
+}
+
+func requestHost(req *http.Request) string {
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
+
+func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) {
+ dataHash, err := requestDataHash(req)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil
+}
+
+type awsRequestHeader struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+}
+
+type awsRequest struct {
+ URL string `json:"url"`
+ Method string `json:"method"`
+ Headers []awsRequestHeader `json:"headers"`
+}
+
+// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
+// required.
+func canRetrieveRegionFromEnvironment() bool {
+ return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != ""
+}
+
+// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
+func canRetrieveSecurityCredentialFromEnvironment() bool {
+ return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != ""
+}
+
+func (sp *awsSubjectProvider) shouldUseMetadataServer() bool {
+ return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
new file mode 100644
index 000000000..d5765c474
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
@@ -0,0 +1,284 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+)
+
+const (
+ executableSupportedMaxVersion = 1
+ executableDefaultTimeout = 30 * time.Second
+ executableSource = "response"
+ executableProviderType = "executable"
+ outputFileSource = "output file"
+
+ allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
+
+ jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
+ idTokenType = "urn:ietf:params:oauth:token-type:id_token"
+ saml2TokenType = "urn:ietf:params:oauth:token-type:saml2"
+)
+
+var (
+ serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`)
+)
+
+type nonCacheableError struct {
+ message string
+}
+
+func (nce nonCacheableError) Error() string {
+ return nce.message
+}
+
+// environment is a contract for testing
+type environment interface {
+ existingEnv() []string
+ getenv(string) string
+ run(ctx context.Context, command string, env []string) ([]byte, error)
+ now() time.Time
+}
+
+type runtimeEnvironment struct{}
+
+func (r runtimeEnvironment) existingEnv() []string {
+ return os.Environ()
+}
+func (r runtimeEnvironment) getenv(key string) string {
+ return os.Getenv(key)
+}
+func (r runtimeEnvironment) now() time.Time {
+ return time.Now().UTC()
+}
+
+func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) {
+ splitCommand := strings.Fields(command)
+ cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...)
+ cmd.Env = env
+
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ return nil, context.DeadlineExceeded
+ }
+ if exitError, ok := err.(*exec.ExitError); ok {
+ return nil, exitCodeError(exitError)
+ }
+ return nil, executableError(err)
+ }
+
+ bytesStdout := bytes.TrimSpace(stdout.Bytes())
+ if len(bytesStdout) > 0 {
+ return bytesStdout, nil
+ }
+ return bytes.TrimSpace(stderr.Bytes()), nil
+}
+
+type executableSubjectProvider struct {
+ Command string
+ Timeout time.Duration
+ OutputFile string
+ client *http.Client
+ opts *Options
+ env environment
+}
+
+type executableResponse struct {
+ Version int `json:"version,omitempty"`
+ Success *bool `json:"success,omitempty"`
+ TokenType string `json:"token_type,omitempty"`
+ ExpirationTime int64 `json:"expiration_time,omitempty"`
+ IDToken string `json:"id_token,omitempty"`
+ SamlResponse string `json:"saml_response,omitempty"`
+ Code string `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
+ var result executableResponse
+ if err := json.Unmarshal(response, &result); err != nil {
+ return "", jsonParsingError(source, string(response))
+ }
+ // Validate
+ if result.Version == 0 {
+ return "", missingFieldError(source, "version")
+ }
+ if result.Success == nil {
+ return "", missingFieldError(source, "success")
+ }
+ if !*result.Success {
+ if result.Code == "" || result.Message == "" {
+ return "", malformedFailureError()
+ }
+ return "", userDefinedError(result.Code, result.Message)
+ }
+ if result.Version > executableSupportedMaxVersion || result.Version < 0 {
+ return "", unsupportedVersionError(source, result.Version)
+ }
+ if result.ExpirationTime == 0 && sp.OutputFile != "" {
+ return "", missingFieldError(source, "expiration_time")
+ }
+ if result.TokenType == "" {
+ return "", missingFieldError(source, "token_type")
+ }
+ if result.ExpirationTime != 0 && result.ExpirationTime < now {
+ return "", tokenExpiredError()
+ }
+
+ switch result.TokenType {
+ case jwtTokenType, idTokenType:
+ if result.IDToken == "" {
+ return "", missingFieldError(source, "id_token")
+ }
+ return result.IDToken, nil
+ case saml2TokenType:
+ if result.SamlResponse == "" {
+ return "", missingFieldError(source, "saml_response")
+ }
+ return result.SamlResponse, nil
+ default:
+ return "", tokenTypeError(source)
+ }
+}
+
+func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil {
+ return token, err
+ }
+ return sp.getTokenFromExecutableCommand(ctx)
+}
+
+func (sp *executableSubjectProvider) providerType() string {
+ return executableProviderType
+}
+
+func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) {
+ if sp.OutputFile == "" {
+ // This ExecutableCredentialSource doesn't use an OutputFile.
+ return "", nil
+ }
+
+ file, err := os.Open(sp.OutputFile)
+ if err != nil {
+ // No OutputFile found. Hasn't been created yet, so skip it.
+ return "", nil
+ }
+ defer file.Close()
+
+ data, err := internal.ReadAll(file)
+ if err != nil || len(data) == 0 {
+ // Cachefile exists, but no data found. Get new credential.
+ return "", nil
+ }
+
+ token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix())
+ if err != nil {
+ if _, ok := err.(nonCacheableError); ok {
+ // If the cached token is expired we need a new token,
+ // and if the cache contains a failure, we need to try again.
+ return "", nil
+ }
+
+ // There was an error in the cached token, and the developer should be aware of it.
+ return "", err
+ }
+ // Token parsing succeeded. Use found token.
+ return token, nil
+}
+
+func (sp *executableSubjectProvider) executableEnvironment() []string {
+ result := sp.env.existingEnv()
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience))
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType))
+ result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0")
+ if sp.opts.ServiceAccountImpersonationURL != "" {
+ matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL)
+ if matches != nil {
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1]))
+ }
+ }
+ if sp.OutputFile != "" {
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile))
+ }
+ return result
+}
+
+func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) {
+ // For security reasons, we need our consumers to set this environment variable to allow executables to be run.
+ if sp.env.getenv(allowExecutablesEnvVar) != "1" {
+ return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
+ }
+
+ ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout))
+ defer cancel()
+
+ output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment())
+ if err != nil {
+ return "", err
+ }
+ return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix())
+}
+
+func missingFieldError(source, field string) error {
+ return fmt.Errorf("credentials: %q missing %q field", source, field)
+}
+
+func jsonParsingError(source, data string) error {
+ return fmt.Errorf("credentials: unable to parse %q: %v", source, data)
+}
+
+func malformedFailureError() error {
+ return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"}
+}
+
+func userDefinedError(code, message string) error {
+ return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)}
+}
+
+func unsupportedVersionError(source string, version int) error {
+ return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version)
+}
+
+func tokenExpiredError() error {
+ return nonCacheableError{"credentials: the token returned by the executable is expired"}
+}
+
+func tokenTypeError(source string) error {
+ return fmt.Errorf("credentials: %v contains unsupported token type", source)
+}
+
+func exitCodeError(err *exec.ExitError) error {
+ return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err)
+}
+
+func executableError(err error) error {
+ return fmt.Errorf("credentials: executable command failed: %w", err)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
new file mode 100644
index 000000000..a82206423
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
@@ -0,0 +1,428 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/impersonate"
+ "cloud.google.com/go/auth/credentials/internal/stsexchange"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ timeoutMinimum = 5 * time.Second
+ timeoutMaximum = 120 * time.Second
+
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
+ defaultUniverseDomain = "googleapis.com"
+)
+
+var (
+ // Now aliases time.Now for testing
+ Now = func() time.Time {
+ return time.Now().UTC()
+ }
+ validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
+)
+
+// Options stores the configuration for fetching tokens with external credentials.
+type Options struct {
+ // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
+ // identity pool or the workforce pool and the provider identifier in that pool.
+ Audience string
+ // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec
+ // e.g. `urn:ietf:params:oauth:token-type:jwt`.
+ SubjectTokenType string
+ // TokenURL is the STS token exchange endpoint.
+ TokenURL string
+ // TokenInfoURL is the token_info endpoint used to retrieve the account related information (
+ // user attributes like account identifier, eg. email, username, uid, etc). This is
+ // needed for gCloud session account identification.
+ TokenInfoURL string
+ // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
+ // required for workload identity pools when APIs to be accessed have not integrated with UberMint.
+ ServiceAccountImpersonationURL string
+ // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
+ // token will be valid for.
+ ServiceAccountImpersonationLifetimeSeconds int
+ // ClientSecret is currently only required if token_info endpoint also
+ // needs to be called with the generated GCP access token. When provided, STS will be
+ // called with additional basic authentication using client_id as username and client_secret as password.
+ ClientSecret string
+ // ClientID is only required in conjunction with ClientSecret, as described above.
+ ClientID string
+ // CredentialSource contains the necessary information to retrieve the token itself, as well
+ // as some environmental information.
+ CredentialSource *credsfile.CredentialSource
+ // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
+ // will set the x-goog-user-project which overrides the project associated with the credentials.
+ QuotaProjectID string
+ // Scopes contains the desired scopes for the returned access token.
+ Scopes []string
+ // WorkforcePoolUserProject should be set when it is a workforce pool and
+ // not a workload identity pool. The underlying principal must still have
+ // serviceusage.services.use IAM permission to use the project for
+ // billing/quota. Optional.
+ WorkforcePoolUserProject string
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // This value will be used in the default STS token URL. The default value
+ // is "googleapis.com". It will not be used if TokenURL is set. Optional.
+ UniverseDomain string
+ // SubjectTokenProvider is an optional token provider for OIDC/SAML
+ // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider
+ // or CredentialSource must be provided. Optional.
+ SubjectTokenProvider SubjectTokenProvider
+ // AwsSecurityCredentialsProvider is an AWS Security Credential provider
+ // for AWS credentials. One of SubjectTokenProvider,
+ // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional.
+ AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
+ // Client for token request.
+ Client *http.Client
+ // IsDefaultClient marks whether the client passed in is a default client that can be overriden.
+ // This is important for X509 credentials which should create a new client if the default was used
+ // but should respect a client explicitly passed in by the user.
+ IsDefaultClient bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+// SubjectTokenProvider can be used to supply a subject token to exchange for a
+// GCP access token.
+type SubjectTokenProvider interface {
+ // SubjectToken should return a valid subject token or an error.
+ // The external account token provider does not cache the returned subject
+ // token, so caching logic should be implemented in the provider to prevent
+ // multiple requests for the same subject token.
+ SubjectToken(ctx context.Context, opts *RequestOptions) (string, error)
+}
+
+// RequestOptions contains information about the requested subject token or AWS
+// security credentials from the Google external account credential.
+type RequestOptions struct {
+ // Audience is the requested audience for the external account credential.
+ Audience string
+ // Subject token type is the requested subject token type for the external
+ // account credential. Expected values include:
+ // “urn:ietf:params:oauth:token-type:jwt”
+ // “urn:ietf:params:oauth:token-type:id-token”
+ // “urn:ietf:params:oauth:token-type:saml2”
+ // “urn:ietf:params:aws:token-type:aws4_request”
+ SubjectTokenType string
+}
+
+// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials
+// and an AWS Region to exchange for a GCP access token.
+type AwsSecurityCredentialsProvider interface {
+ // AwsRegion should return the AWS region or an error.
+ AwsRegion(ctx context.Context, opts *RequestOptions) (string, error)
+ // GetAwsSecurityCredentials should return a valid set of
+ // AwsSecurityCredentials or an error. The external account token provider
+ // does not cache the returned security credentials, so caching logic should
+ // be implemented in the provider to prevent multiple requests for the
+ // same security credentials.
+ AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error)
+}
+
+// AwsSecurityCredentials models AWS security credentials.
+type AwsSecurityCredentials struct {
+ // AccessKeyId is the AWS Access Key ID - Required.
+ AccessKeyID string `json:"AccessKeyID"`
+ // SecretAccessKey is the AWS Secret Access Key - Required.
+ SecretAccessKey string `json:"SecretAccessKey"`
+ // SessionToken is the AWS Session token. This should be provided for
+ // temporary AWS security credentials - Optional.
+ SessionToken string `json:"Token"`
+}
+
+func (o *Options) validate() error {
+ if o.Audience == "" {
+ return fmt.Errorf("externalaccount: Audience must be set")
+ }
+ if o.SubjectTokenType == "" {
+ return fmt.Errorf("externalaccount: Subject token type must be set")
+ }
+ if o.WorkforcePoolUserProject != "" {
+ if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid {
+ return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials")
+ }
+ }
+ count := 0
+ if o.CredentialSource != nil {
+ count++
+ }
+ if o.SubjectTokenProvider != nil {
+ count++
+ }
+ if o.AwsSecurityCredentialsProvider != nil {
+ count++
+ }
+ if count == 0 {
+ return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
+ }
+ if count > 1 {
+ return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
+ }
+ return nil
+}
+
+// client returns the http client that should be used for the token exchange. If a non-default client
+// is provided, then the client configured in the options will always be returned. If a default client
+// is provided and the options are configured for X509 credentials, a new client will be created.
+func (o *Options) client() (*http.Client, error) {
+ // If a client was provided and no override certificate config location was provided, use the provided client.
+ if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") {
+ return o.Client, nil
+ }
+
+ // If a new client should be created, validate and use the certificate source to create a new mTLS client.
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return createX509Client(cert.CertificateConfigLocation)
+}
+
+// resolveTokenURL sets the default STS token endpoint with the configured
+// universe domain.
+func (o *Options) resolveTokenURL() {
+ if o.TokenURL != "" {
+ return
+ } else if o.UniverseDomain != "" {
+ o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1)
+ } else {
+ o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
+ }
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
+// configured with the provided options.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ opts.resolveTokenURL()
+ logger := internallog.New(opts.Logger)
+ stp, err := newSubjectTokenProvider(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := opts.client()
+ if err != nil {
+ return nil, err
+ }
+
+ tp := &tokenProvider{
+ client: client,
+ opts: opts,
+ stp: stp,
+ logger: logger,
+ }
+
+ if opts.ServiceAccountImpersonationURL == "" {
+ return auth.NewCachedTokenProvider(tp, nil), nil
+ }
+
+ scopes := make([]string, len(opts.Scopes))
+ copy(scopes, opts.Scopes)
+ // needed for impersonation
+ tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
+ imp, err := impersonate.NewTokenProvider(&impersonate.Options{
+ Client: client,
+ URL: opts.ServiceAccountImpersonationURL,
+ Scopes: scopes,
+ Tp: auth.NewCachedTokenProvider(tp, nil),
+ TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
+ Logger: logger,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return auth.NewCachedTokenProvider(imp, nil), nil
+}
+
+type subjectTokenProvider interface {
+ subjectToken(ctx context.Context) (string, error)
+ providerType() string
+}
+
+// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
+type tokenProvider struct {
+ client *http.Client
+ logger *slog.Logger
+ opts *Options
+ stp subjectTokenProvider
+}
+
+func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
+ subjectToken, err := tp.stp.subjectToken(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ stsRequest := &stsexchange.TokenRequest{
+ GrantType: stsexchange.GrantType,
+ Audience: tp.opts.Audience,
+ Scope: tp.opts.Scopes,
+ RequestedTokenType: stsexchange.TokenType,
+ SubjectToken: subjectToken,
+ SubjectTokenType: tp.opts.SubjectTokenType,
+ }
+ header := make(http.Header)
+ header.Set("Content-Type", "application/x-www-form-urlencoded")
+ header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp))
+ clientAuth := stsexchange.ClientAuthentication{
+ AuthStyle: auth.StyleInHeader,
+ ClientID: tp.opts.ClientID,
+ ClientSecret: tp.opts.ClientSecret,
+ }
+ var options map[string]interface{}
+ // Do not pass workforce_pool_user_project when client authentication is used.
+ // The client ID is sufficient for determining the user project.
+ if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" {
+ options = map[string]interface{}{
+ "userProject": tp.opts.WorkforcePoolUserProject,
+ }
+ }
+ stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{
+ Client: tp.client,
+ Endpoint: tp.opts.TokenURL,
+ Request: stsRequest,
+ Authentication: clientAuth,
+ Headers: header,
+ ExtraOpts: options,
+ Logger: tp.logger,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ tok := &auth.Token{
+ Value: stsResp.AccessToken,
+ Type: stsResp.TokenType,
+ }
+ // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior.
+ if stsResp.ExpiresIn <= 0 {
+ return nil, fmt.Errorf("credentials: got invalid expiry from security token service")
+ }
+ tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
+ return tok, nil
+}
+
+// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
+// subjectTokenProvider
+func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
+ logger := internallog.New(o.Logger)
+ reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
+ if o.AwsSecurityCredentialsProvider != nil {
+ return &awsSubjectProvider{
+ securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
+ TargetResource: o.Audience,
+ reqOpts: reqOpts,
+ logger: logger,
+ }, nil
+ } else if o.SubjectTokenProvider != nil {
+ return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
+ } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" {
+ if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil {
+ if awsVersion != 1 {
+ return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion)
+ }
+
+ awsProvider := &awsSubjectProvider{
+ EnvironmentID: o.CredentialSource.EnvironmentID,
+ RegionURL: o.CredentialSource.RegionURL,
+ RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL,
+ CredVerificationURL: o.CredentialSource.URL,
+ TargetResource: o.Audience,
+ Client: o.Client,
+ logger: logger,
+ }
+ if o.CredentialSource.IMDSv2SessionTokenURL != "" {
+ awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
+ }
+
+ return awsProvider, nil
+ }
+ } else if o.CredentialSource.File != "" {
+ return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
+ } else if o.CredentialSource.URL != "" {
+ return &urlSubjectProvider{
+ URL: o.CredentialSource.URL,
+ Headers: o.CredentialSource.Headers,
+ Format: o.CredentialSource.Format,
+ Client: o.Client,
+ Logger: logger,
+ }, nil
+ } else if o.CredentialSource.Executable != nil {
+ ec := o.CredentialSource.Executable
+ if ec.Command == "" {
+ return nil, errors.New("credentials: missing `command` field — executable command must be provided")
+ }
+
+ execProvider := &executableSubjectProvider{}
+ execProvider.Command = ec.Command
+ if ec.TimeoutMillis == 0 {
+ execProvider.Timeout = executableDefaultTimeout
+ } else {
+ execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond
+ if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum {
+ return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds())
+ }
+ }
+ execProvider.OutputFile = ec.OutputFile
+ execProvider.client = o.Client
+ execProvider.opts = o
+ execProvider.env = runtimeEnvironment{}
+ return execProvider, nil
+ } else if o.CredentialSource.Certificate != nil {
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return &x509Provider{}, nil
+ }
+ return nil, errors.New("credentials: unable to parse credential source")
+}
+
+func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string {
+ return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
+ goVersion(),
+ "unknown",
+ p.providerType(),
+ conf.ServiceAccountImpersonationURL != "",
+ conf.ServiceAccountImpersonationLifetimeSeconds != 0)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
new file mode 100644
index 000000000..8186939fe
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
@@ -0,0 +1,78 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+)
+
+const (
+ fileProviderType = "file"
+)
+
+type fileSubjectProvider struct {
+ File string
+ Format *credsfile.Format
+}
+
+func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) {
+ tokenFile, err := os.Open(sp.File)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err)
+ }
+ defer tokenFile.Close()
+ tokenBytes, err := internal.ReadAll(tokenFile)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to read credential file: %w", err)
+ }
+ tokenBytes = bytes.TrimSpace(tokenBytes)
+
+ if sp.Format == nil {
+ return string(tokenBytes), nil
+ }
+ switch sp.Format.Type {
+ case fileTypeJSON:
+ jsonData := make(map[string]interface{})
+ err = json.Unmarshal(tokenBytes, &jsonData)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
+ }
+ val, ok := jsonData[sp.Format.SubjectTokenFieldName]
+ if !ok {
+ return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
+ }
+ token, ok := val.(string)
+ if !ok {
+ return "", errors.New("credentials: improperly formatted subject token")
+ }
+ return token, nil
+ case fileTypeText:
+ return string(tokenBytes), nil
+ default:
+ return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
+ }
+}
+
+func (sp *fileSubjectProvider) providerType() string {
+ return fileProviderType
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
new file mode 100644
index 000000000..8e4b4379b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
@@ -0,0 +1,74 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+var (
+ // version is a package internal global variable for testing purposes.
+ version = runtime.Version
+)
+
+// versionUnknown is only used when the runtime version cannot be determined.
+const versionUnknown = "UNKNOWN"
+
+// goVersion returns a Go runtime version derived from the runtime environment
+// that is modified to be suitable for reporting in a header, meaning it has no
+// whitespace. If it is unable to determine the Go runtime version, it returns
+// versionUnknown.
+func goVersion() string {
+ const develPrefix = "devel +"
+
+ s := version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+
+ notSemverRune := func(r rune) bool {
+ return !strings.ContainsRune("0123456789.", r)
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ // Some release candidates already have a dash in them.
+ if !strings.HasPrefix(prerelease, "-") {
+ prerelease = "-" + prerelease
+ }
+ s += prerelease
+ }
+ return s
+ }
+ return versionUnknown
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
new file mode 100644
index 000000000..be3c87351
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
@@ -0,0 +1,30 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import "context"
+
+type programmaticProvider struct {
+ opts *RequestOptions
+ stp SubjectTokenProvider
+}
+
+func (pp *programmaticProvider) providerType() string {
+ return programmaticProviderType
+}
+
+func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) {
+ return pp.stp.SubjectToken(ctx, pp.opts)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
new file mode 100644
index 000000000..754ecf4fe
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
@@ -0,0 +1,93 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ fileTypeText = "text"
+ fileTypeJSON = "json"
+ urlProviderType = "url"
+ programmaticProviderType = "programmatic"
+ x509ProviderType = "x509"
+)
+
+type urlSubjectProvider struct {
+ URL string
+ Headers map[string]string
+ Format *credsfile.Format
+ Client *http.Client
+ Logger *slog.Logger
+}
+
+func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil)
+ if err != nil {
+ return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err)
+ }
+
+ for key, val := range sp.Headers {
+ req.Header.Add(key, val)
+ }
+ sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
+ }
+ sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return "", fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+
+ if sp.Format == nil {
+ return string(body), nil
+ }
+ switch sp.Format.Type {
+ case "json":
+ jsonData := make(map[string]interface{})
+ err = json.Unmarshal(body, &jsonData)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
+ }
+ val, ok := jsonData[sp.Format.SubjectTokenFieldName]
+ if !ok {
+ return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
+ }
+ token, ok := val.(string)
+ if !ok {
+ return "", errors.New("credentials: improperly formatted subject token")
+ }
+ return token, nil
+ case fileTypeText:
+ return string(body), nil
+ default:
+ return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
+ }
+}
+
+func (sp *urlSubjectProvider) providerType() string {
+ return urlProviderType
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
new file mode 100644
index 000000000..115df5881
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
@@ -0,0 +1,63 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth/internal/transport/cert"
+)
+
+// x509Provider implements the subjectTokenProvider type for
+// x509 workload identity credentials. Because x509 credentials
+// rely on an mTLS connection to represent the 3rd party identity
+// rather than a subject token, this provider will always return
+// an empty string when a subject token is requested by the external account
+// token provider.
+type x509Provider struct {
+}
+
+func (xp *x509Provider) providerType() string {
+ return x509ProviderType
+}
+
+func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) {
+ return "", nil
+}
+
+// createX509Client creates a new client that is configured with mTLS, using the
+// certificate configuration specified in the credential source.
+func createX509Client(certificateConfigLocation string) (*http.Client, error) {
+ certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation)
+ if err != nil {
+ return nil, err
+ }
+ trans := http.DefaultTransport.(*http.Transport).Clone()
+
+ trans.TLSClientConfig = &tls.Config{
+ GetClientCertificate: certProvider,
+ }
+
+ // Create a client with default settings plus the X509 workload cert and key.
+ client := &http.Client{
+ Transport: trans,
+ Timeout: 30 * time.Second,
+ }
+
+ return client, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
new file mode 100644
index 000000000..ae39206e5
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
@@ -0,0 +1,115 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccountuser
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/stsexchange"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// Options stores the configuration for fetching tokens with external authorized
+// user credentials.
+type Options struct {
+ // Audience is the Secure Token Service (STS) audience which contains the
+ // resource name for the workforce pool and the provider identifier in that
+ // pool.
+ Audience string
+ // RefreshToken is the OAuth 2.0 refresh token.
+ RefreshToken string
+ // TokenURL is the STS token exchange endpoint for refresh.
+ TokenURL string
+ // TokenInfoURL is the STS endpoint URL for token introspection. Optional.
+ TokenInfoURL string
+ // ClientID is only required in conjunction with ClientSecret, as described
+ // below.
+ ClientID string
+ // ClientSecret is currently only required if token_info endpoint also needs
+ // to be called with the generated a cloud access token. When provided, STS
+ // will be called with additional basic authentication using client_id as
+ // username and client_secret as password.
+ ClientSecret string
+ // Scopes contains the desired scopes for the returned access token.
+ Scopes []string
+
+ // Client for token request.
+ Client *http.Client
+ // Logger for logging.
+ Logger *slog.Logger
+}
+
+func (c *Options) validate() bool {
+ return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != ""
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
+// configured with the provided options.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if !opts.validate() {
+ return nil, errors.New("credentials: invalid external_account_authorized_user configuration")
+ }
+
+ tp := &tokenProvider{
+ o: opts,
+ }
+ return auth.NewCachedTokenProvider(tp, nil), nil
+}
+
+type tokenProvider struct {
+ o *Options
+}
+
+func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
+ opts := tp.o
+
+ clientAuth := stsexchange.ClientAuthentication{
+ AuthStyle: auth.StyleInHeader,
+ ClientID: opts.ClientID,
+ ClientSecret: opts.ClientSecret,
+ }
+ headers := make(http.Header)
+ headers.Set("Content-Type", "application/x-www-form-urlencoded")
+ stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{
+ Client: opts.Client,
+ Endpoint: opts.TokenURL,
+ RefreshToken: opts.RefreshToken,
+ Authentication: clientAuth,
+ Headers: headers,
+ Logger: internallog.New(tp.o.Logger),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if stsResponse.ExpiresIn < 0 {
+ return nil, errors.New("credentials: invalid expiry from security token service")
+ }
+
+ // guarded by the wrapping with CachedTokenProvider
+ if stsResponse.RefreshToken != "" {
+ opts.RefreshToken = stsResponse.RefreshToken
+ }
+ return &auth.Token{
+ Value: stsResponse.AccessToken,
+ Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second),
+ Type: internal.TokenTypeBearer,
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
new file mode 100644
index 000000000..c2d320fdf
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
@@ -0,0 +1,191 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gdch
+
+import (
+ "context"
+ "crypto"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // GrantType is the grant type for the token request.
+ GrantType = "urn:ietf:params:oauth:token-type:token-exchange"
+ requestTokenType = "urn:ietf:params:oauth:token-type:access_token"
+ subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount"
+)
+
+var (
+ gdchSupportFormatVersions map[string]bool = map[string]bool{
+ "1": true,
+ }
+)
+
+// Options for [NewTokenProvider].
+type Options struct {
+ STSAudience string
+ Client *http.Client
+ Logger *slog.Logger
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
+// GDCH cred file.
+func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) {
+ if !gdchSupportFormatVersions[f.FormatVersion] {
+ return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion)
+ }
+ if o.STSAudience == "" {
+ return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
+ if err != nil {
+ return nil, err
+ }
+ certPool, err := loadCertPool(f.CertPath)
+ if err != nil {
+ return nil, err
+ }
+
+ tp := gdchProvider{
+ serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
+ tokenURL: f.TokenURL,
+ aud: o.STSAudience,
+ signer: signer,
+ pkID: f.PrivateKeyID,
+ certPool: certPool,
+ client: o.Client,
+ logger: internallog.New(o.Logger),
+ }
+ return tp, nil
+}
+
+func loadCertPool(path string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+ pem, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read certificate: %w", err)
+ }
+ pool.AppendCertsFromPEM(pem)
+ return pool, nil
+}
+
+type gdchProvider struct {
+ serviceIdentity string
+ tokenURL string
+ aud string
+ signer crypto.Signer
+ pkID string
+ certPool *x509.CertPool
+
+ client *http.Client
+ logger *slog.Logger
+}
+
+func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
+ addCertToTransport(g.client, g.certPool)
+ iat := time.Now()
+ exp := iat.Add(time.Hour)
+ claims := jwt.Claims{
+ Iss: g.serviceIdentity,
+ Sub: g.serviceIdentity,
+ Aud: g.tokenURL,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ h := jwt.Header{
+ Algorithm: jwt.HeaderAlgRSA256,
+ Type: jwt.HeaderType,
+ KeyID: string(g.pkID),
+ }
+ payload, err := jwt.EncodeJWS(&h, &claims, g.signer)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", GrantType)
+ v.Set("audience", g.aud)
+ v.Set("requested_token_type", requestTokenType)
+ v.Set("subject_token", payload)
+ v.Set("subject_token_type", subjectTokenType)
+
+ req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ resp, body, err := internal.DoRequest(g.client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
+ return nil, &auth.Error{
+ Response: resp,
+ Body: body,
+ }
+ }
+
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ token := &auth.Token{
+ Value: tokenRes.AccessToken,
+ Type: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token.Metadata = raw
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ return token, nil
+}
+
+// addCertToTransport makes a best effort attempt at adding in the cert info to
+// the client. It tries to keep all configured transport settings if the
+// underlying transport is an http.Transport. Or else it overwrites the
+// transport with defaults adding in the certs.
+func addCertToTransport(hc *http.Client, certPool *x509.CertPool) {
+ trans, ok := hc.Transport.(*http.Transport)
+ if !ok {
+ trans = http.DefaultTransport.(*http.Transport).Clone()
+ }
+ trans.TLSClientConfig = &tls.Config{
+ RootCAs: certPool,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
new file mode 100644
index 000000000..705462c16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
@@ -0,0 +1,105 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN"
+)
+
+// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token].
+type IDTokenIAMOptions struct {
+ // Client is required.
+ Client *http.Client
+ // Logger is required.
+ Logger *slog.Logger
+ UniverseDomain auth.CredentialsPropertyProvider
+ ServiceAccountEmail string
+ GenerateIDTokenRequest
+}
+
+// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC.
+type GenerateIDTokenRequest struct {
+ Audience string `json:"audience"`
+ IncludeEmail bool `json:"includeEmail"`
+ // Delegates are the ordered, fully-qualified resource name for service
+ // accounts in a delegation chain. Each service account must be granted
+ // roles/iam.serviceAccountTokenCreator on the next service account in the
+ // chain. The delegates must have the following format:
+ // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard
+ // character is required; replacing it with a project ID is invalid.
+ // Optional.
+ Delegates []string `json:"delegates,omitempty"`
+}
+
+// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC.
+type GenerateIDTokenResponse struct {
+ Token string `json:"token"`
+}
+
+// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions].
+func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) {
+ universeDomain, err := o.UniverseDomain.GetProperty(ctx)
+ if err != nil {
+ return nil, err
+ }
+ endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1)
+ url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail))
+
+ bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to create request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err)
+ }
+ o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("impersonate: status code %d: %s", c, body)
+ }
+
+ var tokenResp GenerateIDTokenResponse
+ if err := json.Unmarshal(body, &tokenResp); err != nil {
+ return nil, fmt.Errorf("impersonate: unable to parse response: %w", err)
+ }
+ return &auth.Token{
+ Value: tokenResp.Token,
+ // Generated ID tokens are good for one hour.
+ Expiry: time.Now().Add(1 * time.Hour),
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
new file mode 100644
index 000000000..b3a99261f
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
@@ -0,0 +1,156 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ defaultTokenLifetime = "3600s"
+ authHeaderKey = "Authorization"
+)
+
+// generateAccesstokenReq is used for service account impersonation
+type generateAccessTokenReq struct {
+ Delegates []string `json:"delegates,omitempty"`
+ Lifetime string `json:"lifetime,omitempty"`
+ Scope []string `json:"scope,omitempty"`
+}
+
+type impersonateTokenResponse struct {
+ AccessToken string `json:"accessToken"`
+ ExpireTime string `json:"expireTime"`
+}
+
+// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL.
+// Scopes can be defined when the access token is requested.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ return opts, nil
+}
+
+// Options for [NewTokenProvider].
+type Options struct {
+ // Tp is the source credential used to generate a token on the
+ // impersonated service account. Required.
+ Tp auth.TokenProvider
+
+ // URL is the endpoint to call to generate a token
+ // on behalf of the service account. Required.
+ URL string
+ // Scopes that the impersonated credential should have. Required.
+ Scopes []string
+ // Delegates are the service account email addresses in a delegation chain.
+ // Each service account must be granted roles/iam.serviceAccountTokenCreator
+ // on the next service account in the chain. Optional.
+ Delegates []string
+ // TokenLifetimeSeconds is the number of seconds the impersonation token will
+ // be valid for. Defaults to 1 hour if unset. Optional.
+ TokenLifetimeSeconds int
+ // Client configures the underlying client used to make network requests
+ // when fetching tokens. Required.
+ Client *http.Client
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options) validate() error {
+ if o.Tp == nil {
+ return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials")
+ }
+ if o.URL == "" {
+ return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials")
+ }
+ return nil
+}
+
+// Token performs the exchange to get a temporary service account token to allow access to GCP.
+func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
+ logger := internallog.New(o.Logger)
+ lifetime := defaultTokenLifetime
+ if o.TokenLifetimeSeconds != 0 {
+ lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
+ }
+ reqBody := generateAccessTokenReq{
+ Lifetime: lifetime,
+ Scope: o.Scopes,
+ Delegates: o.Delegates,
+ }
+ b, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to marshal request: %w", err)
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ if err := setAuthHeader(ctx, o.Tp, req); err != nil {
+ return nil, err
+ }
+ logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
+ }
+ logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+
+ var accessTokenResp impersonateTokenResponse
+ if err := json.Unmarshal(body, &accessTokenResp); err != nil {
+ return nil, fmt.Errorf("credentials: unable to parse response: %w", err)
+ }
+ expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
+ }
+ return &auth.Token{
+ Value: accessTokenResp.AccessToken,
+ Expiry: expiry,
+ Type: internal.TokenTypeBearer,
+ }, nil
+}
+
+func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
+ t, err := tp.Token(ctx)
+ if err != nil {
+ return err
+ }
+ typ := t.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ r.Header.Set(authHeaderKey, typ+" "+t.Value)
+ return nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
new file mode 100644
index 000000000..e1d2b1503
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
@@ -0,0 +1,167 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stsexchange
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // GrantType for a sts exchange.
+ GrantType = "urn:ietf:params:oauth:grant-type:token-exchange"
+ // TokenType for a sts exchange.
+ TokenType = "urn:ietf:params:oauth:token-type:access_token"
+
+ jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
+)
+
+// Options stores the configuration for making an sts exchange request.
+type Options struct {
+ Client *http.Client
+ Logger *slog.Logger
+ Endpoint string
+ Request *TokenRequest
+ Authentication ClientAuthentication
+ Headers http.Header
+ // ExtraOpts are optional fields marshalled into the `options` field of the
+ // request body.
+ ExtraOpts map[string]interface{}
+ RefreshToken string
+}
+
+// RefreshAccessToken performs the token exchange using a refresh token flow.
+func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
+ data := url.Values{}
+ data.Set("grant_type", "refresh_token")
+ data.Set("refresh_token", opts.RefreshToken)
+ return doRequest(ctx, opts, data)
+}
+
+// ExchangeToken performs an oauth2 token exchange with the provided endpoint.
+func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
+ data := url.Values{}
+ data.Set("audience", opts.Request.Audience)
+ data.Set("grant_type", GrantType)
+ data.Set("requested_token_type", TokenType)
+ data.Set("subject_token_type", opts.Request.SubjectTokenType)
+ data.Set("subject_token", opts.Request.SubjectToken)
+ data.Set("scope", strings.Join(opts.Request.Scope, " "))
+ if opts.ExtraOpts != nil {
+ opts, err := json.Marshal(opts.ExtraOpts)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err)
+ }
+ data.Set("options", string(opts))
+ }
+ return doRequest(ctx, opts, data)
+}
+
+func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
+ opts.Authentication.InjectAuthentication(data, opts.Headers)
+ encodedData := data.Encode()
+ logger := internallog.New(opts.Logger)
+
+ req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err)
+
+ }
+ for key, list := range opts.Headers {
+ for _, val := range list {
+ req.Header.Add(key, val)
+ }
+ }
+ req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
+
+ logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData)))
+ resp, body, err := internal.DoRequest(opts.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
+ }
+ logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
+ return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+ var stsResp TokenResponse
+ if err := json.Unmarshal(body, &stsResp); err != nil {
+ return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err)
+ }
+
+ return &stsResp, nil
+}
+
+// TokenRequest contains fields necessary to make an oauth2 token
+// exchange.
+type TokenRequest struct {
+ ActingParty struct {
+ ActorToken string
+ ActorTokenType string
+ }
+ GrantType string
+ Resource string
+ Audience string
+ Scope []string
+ RequestedTokenType string
+ SubjectToken string
+ SubjectTokenType string
+}
+
+// TokenResponse is used to decode the remote server response during
+// an oauth2 token exchange.
+type TokenResponse struct {
+ AccessToken string `json:"access_token"`
+ IssuedTokenType string `json:"issued_token_type"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int `json:"expires_in"`
+ Scope string `json:"scope"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+// ClientAuthentication represents an OAuth client ID and secret and the
+// mechanism for passing these credentials as stated in rfc6749#2.3.1.
+type ClientAuthentication struct {
+ AuthStyle auth.Style
+ ClientID string
+ ClientSecret string
+}
+
+// InjectAuthentication is used to add authentication to a Secure Token Service
+// exchange request. It modifies either the passed url.Values or http.Header
+// depending on the desired authentication format.
+func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) {
+ if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil {
+ return
+ }
+ switch c.AuthStyle {
+ case auth.StyleInHeader:
+ plainHeader := c.ClientID + ":" + c.ClientSecret
+ headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader)))
+ default:
+ values.Set("client_id", c.ClientID)
+ values.Set("client_secret", c.ClientSecret)
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
new file mode 100644
index 000000000..8d335ccec
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -0,0 +1,89 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "crypto"
+ "errors"
+ "fmt"
+ "log/slog"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/jwt"
+)
+
+var (
+ // for testing
+ now func() time.Time = time.Now
+)
+
+// configureSelfSignedJWT uses the private key in the service account to create
+// a JWT without making a network call.
+func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not parse key: %w", err)
+ }
+ return &selfSignedTokenProvider{
+ email: f.ClientEmail,
+ audience: opts.Audience,
+ scopes: opts.scopes(),
+ signer: signer,
+ pkID: f.PrivateKeyID,
+ logger: opts.logger(),
+ }, nil
+}
+
+type selfSignedTokenProvider struct {
+ email string
+ audience string
+ scopes []string
+ signer crypto.Signer
+ pkID string
+ logger *slog.Logger
+}
+
+func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
+ iat := now()
+ exp := iat.Add(time.Hour)
+ scope := strings.Join(tp.scopes, " ")
+ c := &jwt.Claims{
+ Iss: tp.email,
+ Sub: tp.email,
+ Aud: tp.audience,
+ Scope: scope,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ h := &jwt.Header{
+ Algorithm: jwt.HeaderAlgRSA256,
+ Type: jwt.HeaderType,
+ KeyID: string(tp.pkID),
+ }
+ tok, err := jwt.EncodeJWS(h, c, tp.signer)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
+ }
+ tp.logger.Debug("created self-signed JWT", "token", tok)
+ return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go
new file mode 100644
index 000000000..e61360805
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go
@@ -0,0 +1,62 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package grpctransport
+
+import (
+ "context"
+ "net"
+ "syscall"
+
+ "google.golang.org/grpc"
+)
+
+const (
+ // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By
+ // default is 20 seconds.
+ tcpUserTimeoutMilliseconds = 20000
+
+ // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT.
+ tcpUserTimeoutOp = 0x12
+)
+
+func init() {
+ // timeoutDialerOption is a grpc.DialOption that contains dialer with
+ // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+.
+ timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout)
+}
+
+func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) {
+ control := func(network, address string, c syscall.RawConn) error {
+ var syscallErr error
+ controlErr := c.Control(func(fd uintptr) {
+ syscallErr = syscall.SetsockoptInt(
+ int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds)
+ })
+ if syscallErr != nil {
+ return syscallErr
+ }
+ if controlErr != nil {
+ return controlErr
+ }
+ return nil
+ }
+ d := &net.Dialer{
+ Control: control,
+ }
+ return d.DialContext(ctx, "tcp", addr)
+}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
new file mode 100644
index 000000000..d781c3e49
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
@@ -0,0 +1,126 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpctransport
+
+import (
+ "context"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal/compute"
+ "google.golang.org/grpc"
+ grpcgoogle "google.golang.org/grpc/credentials/google"
+)
+
+func isDirectPathEnabled(endpoint string, opts *Options) bool {
+ if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath {
+ return false
+ }
+ if !checkDirectPathEndPoint(endpoint) {
+ return false
+ }
+ if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b {
+ return false
+ }
+ return true
+}
+
+func checkDirectPathEndPoint(endpoint string) bool {
+ // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://").
+ // Also don't try direct path if the user has chosen an alternate name resolver
+ // (i.e., via ":///" prefix).
+ if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") {
+ return false
+ }
+
+ if endpoint == "" {
+ return false
+ }
+
+ return true
+}
+
+func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool {
+ if tp == nil {
+ return false
+ }
+ tok, err := tp.Token(context.Background())
+ if err != nil {
+ return false
+ }
+ if tok == nil {
+ return false
+ }
+ if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ return false
+ }
+ if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath {
+ return true
+ }
+ if tok.MetadataString("auth.google.serviceAccount") != "default" {
+ return false
+ }
+ return true
+}
+
+func isDirectPathXdsUsed(o *Options) bool {
+ // Method 1: Enable DirectPath xDS by env;
+ if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b {
+ return true
+ }
+ // Method 2: Enable DirectPath xDS by option;
+ if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds {
+ return true
+ }
+ return false
+}
+
+// configureDirectPath returns some dial options and an endpoint to use if the
+// configuration allows the use of direct path. If it does not the provided
+// grpcOpts and endpoint are returned.
+func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
+ if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) {
+ // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
+ grpcOpts = []grpc.DialOption{
+ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
+ if timeoutDialerOption != nil {
+ grpcOpts = append(grpcOpts, timeoutDialerOption)
+ }
+ // Check if google-c2p resolver is enabled for DirectPath
+ if isDirectPathXdsUsed(opts) {
+ // google-c2p resolver target must not have a port number
+ if addr, _, err := net.SplitHostPort(endpoint); err == nil {
+ endpoint = "google-c2p:///" + addr
+ } else {
+ endpoint = "google-c2p:///" + endpoint
+ }
+ } else {
+ if !strings.HasPrefix(endpoint, "dns:///") {
+ endpoint = "dns:///" + endpoint
+ }
+ grpcOpts = append(grpcOpts,
+ // For now all DirectPath go clients will be using the following lb config, but in future
+ // when different services need different configs, then we should change this to a
+ // per-service config.
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
+ }
+ // TODO: add support for system parameters (quota project, request reason) via chained interceptor.
+ }
+ return grpcOpts, endpoint
+}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
new file mode 100644
index 000000000..4610a4855
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -0,0 +1,453 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package grpctransport provides functionality for managing gRPC client
+// connections to Google Cloud services.
+package grpctransport
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "sync"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport"
+ "github.com/googleapis/gax-go/v2/internallog"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "google.golang.org/grpc"
+ grpccreds "google.golang.org/grpc/credentials"
+ grpcinsecure "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/stats"
+)
+
+const (
+ // Check env to disable DirectPath traffic.
+ disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH"
+
+ // Check env to decide if using google-c2p resolver for DirectPath traffic.
+ enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
+
+ quotaProjectHeaderKey = "X-goog-user-project"
+)
+
+var (
+ // Set at init time by dial_socketopt.go. If nil, socketopt is not supported.
+ timeoutDialerOption grpc.DialOption
+)
+
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: When this module depends on a version of otelgrpc containing the fix,
+// replace this singleton with inline usage for simplicity.
+// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
+// ClientCertProvider is a function that returns a TLS client certificate to be
+// used when opening TLS connections. It follows the same semantics as
+// [crypto/tls.Config.GetClientCertificate].
+type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+// Options used to configure a [GRPCClientConnPool] from [Dial].
+type Options struct {
+ // DisableTelemetry disables default telemetry (OpenTelemetry). An example
+ // reason to do so would be to bind custom telemetry that overrides the
+ // defaults.
+ DisableTelemetry bool
+ // DisableAuthentication specifies that no authentication should be used. It
+ // is suitable only for testing and for accessing public resources, like
+ // public Google Cloud Storage buckets.
+ DisableAuthentication bool
+ // Endpoint overrides the default endpoint to be used for a service.
+ Endpoint string
+ // Metadata is extra gRPC metadata that will be appended to every outgoing
+ // request.
+ Metadata map[string]string
+ // GRPCDialOpts are dial options that will be passed to `grpc.Dial` when
+ // establishing a`grpc.Conn``
+ GRPCDialOpts []grpc.DialOption
+ // PoolSize is specifies how many connections to balance between when making
+ // requests. If unset or less than 1, the value defaults to 1.
+ PoolSize int
+ // Credentials used to add Authorization metadata to all requests. If set
+ // DetectOpts are ignored.
+ Credentials *auth.Credentials
+ // ClientCertProvider is a function that returns a TLS client certificate to
+ // be used when opening TLS connections. It follows the same semantics as
+ // crypto/tls.Config.GetClientCertificate.
+ ClientCertProvider ClientCertProvider
+ // DetectOpts configures settings for detect Application Default
+ // Credentials.
+ DetectOpts *credentials.DetectOptions
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // The default value is "googleapis.com". This is the universe domain
+ // configured for the client, which will be compared to the universe domain
+ // that is separately configured for the credentials.
+ UniverseDomain string
+ // APIKey specifies an API key to be used as the basis for authentication.
+ // If set DetectOpts are ignored.
+ APIKey string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+
+ // InternalOptions are NOT meant to be set directly by consumers of this
+ // package, they should only be set by generated client code.
+ InternalOptions *InternalOptions
+}
+
+// client returns the client a user set for the detect options or nil if one was
+// not set.
+func (o *Options) client() *http.Client {
+ if o.DetectOpts != nil && o.DetectOpts.Client != nil {
+ return o.DetectOpts.Client
+ }
+ return nil
+}
+
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+func (o *Options) validate() error {
+ if o == nil {
+ return errors.New("grpctransport: opts required to be non-nil")
+ }
+ if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
+ return nil
+ }
+ hasCreds := o.APIKey != "" ||
+ o.Credentials != nil ||
+ (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
+ (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
+ if o.DisableAuthentication && hasCreds {
+ return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials")
+ }
+ return nil
+}
+
+func (o *Options) resolveDetectOptions() *credentials.DetectOptions {
+ io := o.InternalOptions
+ // soft-clone these so we are not updating a ref the user holds and may reuse
+ do := transport.CloneDetectOptions(o.DetectOpts)
+
+ // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
+ if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
+ do.UseSelfSignedJWT = true
+ }
+ // Only default scopes if user did not also set an audience.
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
+ do.Scopes = make([]string, len(io.DefaultScopes))
+ copy(do.Scopes, io.DefaultScopes)
+ }
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
+ do.Audience = o.InternalOptions.DefaultAudience
+ }
+ if o.ClientCertProvider != nil {
+ tlsConfig := &tls.Config{
+ GetClientCertificate: o.ClientCertProvider,
+ }
+ do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
+ do.TokenURL = credentials.GoogleMTLSTokenURL
+ }
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
+ return do
+}
+
+// InternalOptions are only meant to be set by generated client code. These are
+// not meant to be set directly by consumers of this package. Configuration in
+// this type is considered EXPERIMENTAL and may be removed at any time in the
+// future without warning.
+type InternalOptions struct {
+ // EnableNonDefaultSAForDirectPath overrides the default requirement for
+ // using the default service account for DirectPath.
+ EnableNonDefaultSAForDirectPath bool
+ // EnableDirectPath overrides the default attempt to use DirectPath.
+ EnableDirectPath bool
+ // EnableDirectPathXds overrides the default DirectPath type. It is only
+ // valid when DirectPath is enabled.
+ EnableDirectPathXds bool
+ // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
+ EnableJWTWithScope bool
+ // AllowHardBoundTokens allows libraries to request a hard-bound token.
+ // Obtaining hard-bound tokens requires the connection to be established
+ // using either ALTS or mTLS with S2A.
+ AllowHardBoundTokens []string
+ // DefaultAudience specifies a default audience to be used as the audience
+ // field ("aud") for the JWT token authentication.
+ DefaultAudience string
+ // DefaultEndpointTemplate combined with UniverseDomain specifies
+ // the default endpoint.
+ DefaultEndpointTemplate string
+ // DefaultMTLSEndpoint specifies the default mTLS endpoint.
+ DefaultMTLSEndpoint string
+ // DefaultScopes specifies the default OAuth2 scopes to be used for a
+ // service.
+ DefaultScopes []string
+ // SkipValidation bypasses validation on Options. It should only be used
+ // internally for clients that needs more control over their transport.
+ SkipValidation bool
+}
+
+// Dial returns a GRPCClientConnPool that can be used to communicate with a
+// Google cloud service, configured with the provided [Options]. It
+// automatically appends Authorization metadata to all outgoing requests.
+func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ if opts.PoolSize <= 1 {
+ conn, err := dial(ctx, secure, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &singleConnPool{conn}, nil
+ }
+ pool := &roundRobinConnPool{}
+ for i := 0; i < opts.PoolSize; i++ {
+ conn, err := dial(ctx, secure, opts)
+ if err != nil {
+ // ignore close error, if any
+ defer pool.Close()
+ return nil, err
+ }
+ pool.conns = append(pool.conns, conn)
+ }
+ return pool, nil
+}
+
+// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1
+func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) {
+ tOpts := &transport.Options{
+ Endpoint: opts.Endpoint,
+ ClientCertProvider: opts.ClientCertProvider,
+ Client: opts.client(),
+ UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
+ }
+ if io := opts.InternalOptions; io != nil {
+ tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
+ tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
+ tOpts.EnableDirectPath = io.EnableDirectPath
+ tOpts.EnableDirectPathXds = io.EnableDirectPathXds
+ }
+ transportCreds, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ if !secure {
+ transportCreds.TransportCredentials = grpcinsecure.NewCredentials()
+ }
+
+ // Initialize gRPC dial options with transport-level security options.
+ grpcOpts := []grpc.DialOption{
+ grpc.WithTransportCredentials(transportCreds),
+ }
+
+ // Ensure the token exchange HTTP transport uses the same ClientCertProvider as the GRPC API transport.
+ opts.ClientCertProvider, err = transport.GetClientCertificateProvider(tOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.APIKey != "" {
+ grpcOpts = append(grpcOpts,
+ grpc.WithPerRPCCredentials(&grpcKeyProvider{
+ apiKey: opts.APIKey,
+ metadata: opts.Metadata,
+ secure: secure,
+ }),
+ )
+ } else if !opts.DisableAuthentication {
+ metadata := opts.Metadata
+
+ var creds *auth.Credentials
+ if opts.Credentials != nil {
+ creds = opts.Credentials
+ } else {
+ // This condition is only met for non-DirectPath clients because
+ // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath
+ // is false.
+ if transportCreds.TransportType == transport.TransportTypeMTLSS2A {
+ // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A.
+ for _, ev := range opts.InternalOptions.AllowHardBoundTokens {
+ if ev == "MTLS_S2A" {
+ opts.DetectOpts.TokenBindingType = credentials.MTLSHardBinding
+ break
+ }
+ }
+ }
+ var err error
+ creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ qp, err := creds.QuotaProjectID(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if qp != "" {
+ if metadata == nil {
+ metadata = make(map[string]string, 1)
+ }
+ // Don't overwrite user specified quota
+ if _, ok := metadata[quotaProjectHeaderKey]; !ok {
+ metadata[quotaProjectHeaderKey] = qp
+ }
+ }
+ grpcOpts = append(grpcOpts,
+ grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
+ creds: creds,
+ metadata: metadata,
+ clientUniverseDomain: opts.UniverseDomain,
+ }),
+ )
+ // Attempt Direct Path
+ grpcOpts, transportCreds.Endpoint = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds)
+ }
+
+ // Add tracing, but before the other options, so that clients can override the
+ // gRPC stats handler.
+ // This assumes that gRPC options are processed in order, left to right.
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts)
+ grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
+
+ return grpc.Dial(transportCreds.Endpoint, grpcOpts...)
+}
+
+// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
+type grpcKeyProvider struct {
+ apiKey string
+ metadata map[string]string
+ secure bool
+}
+
+func (g *grpcKeyProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ metadata := make(map[string]string, len(g.metadata)+1)
+ metadata["X-goog-api-key"] = g.apiKey
+ for k, v := range g.metadata {
+ metadata[k] = v
+ }
+ return metadata, nil
+}
+
+func (g *grpcKeyProvider) RequireTransportSecurity() bool {
+ return g.secure
+}
+
+// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
+type grpcCredentialsProvider struct {
+ creds *auth.Credentials
+
+ secure bool
+
+ // Additional metadata attached as headers.
+ metadata map[string]string
+ clientUniverseDomain string
+}
+
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
+func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
+ if c.clientUniverseDomain != "" {
+ return c.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
+ }
+ return internal.DefaultUniverseDomain
+}
+
+func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ token, err := c.creds.Token(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
+ return nil, err
+ }
+ }
+ if c.secure {
+ ri, _ := grpccreds.RequestInfoFromContext(ctx)
+ if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil {
+ return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err)
+ }
+ }
+ metadata := make(map[string]string, len(c.metadata)+1)
+ setAuthMetadata(token, metadata)
+ for k, v := range c.metadata {
+ metadata[k] = v
+ }
+ return metadata, nil
+}
+
+// setAuthMetadata uses the provided token to set the Authorization metadata.
+// If the token.Type is empty, the type is assumed to be Bearer.
+func setAuthMetadata(token *auth.Token, m map[string]string) {
+ typ := token.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ m["authorization"] = typ + " " + token.Value
+}
+
+func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
+ return c.secure
+}
+
+func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
+ if opts.DisableTelemetry {
+ return dialOpts
+ }
+ return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
+}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/pool.go b/vendor/cloud.google.com/go/auth/grpctransport/pool.go
new file mode 100644
index 000000000..642679f9b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/grpctransport/pool.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpctransport
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+
+ "google.golang.org/grpc"
+)
+
+// GRPCClientConnPool is an interface that satisfies
+// [google.golang.org/grpc.ClientConnInterface] and has some utility functions
+// that are needed for connection lifecycle when using in a client library. It
+// may be a pool or a single connection. This interface is not intended to, and
+// can't be, implemented by others.
+type GRPCClientConnPool interface {
+ // Connection returns a [google.golang.org/grpc.ClientConn] from the pool.
+ //
+ // ClientConn aren't returned to the pool and should not be closed directly.
+ Connection() *grpc.ClientConn
+
+ // Len returns the number of connections in the pool. It will always return
+ // the same value.
+ Len() int
+
+ // Close closes every ClientConn in the pool. The error returned by Close
+ // may be a single error or multiple errors.
+ Close() error
+
+ grpc.ClientConnInterface
+
+ // private ensure others outside this package can't implement this type
+ private()
+}
+
+// singleConnPool is a special case for a single connection.
+type singleConnPool struct {
+ *grpc.ClientConn
+}
+
+func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn }
+func (p *singleConnPool) Len() int { return 1 }
+func (p *singleConnPool) private() {}
+
+type roundRobinConnPool struct {
+ conns []*grpc.ClientConn
+
+ idx uint32 // access via sync/atomic
+}
+
+func (p *roundRobinConnPool) Len() int {
+ return len(p.conns)
+}
+
+func (p *roundRobinConnPool) Connection() *grpc.ClientConn {
+ i := atomic.AddUint32(&p.idx, 1)
+ return p.conns[i%uint32(len(p.conns))]
+}
+
+func (p *roundRobinConnPool) Close() error {
+ var errs multiError
+ for _, conn := range p.conns {
+ if err := conn.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) == 0 {
+ return nil
+ }
+ return errs
+}
+
+func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
+ return p.Connection().Invoke(ctx, method, args, reply, opts...)
+}
+
+func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ return p.Connection().NewStream(ctx, desc, method, opts...)
+}
+
+func (p *roundRobinConnPool) private() {}
+
+// multiError represents errors from multiple conns in the group.
+type multiError []error
+
+func (m multiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
new file mode 100644
index 000000000..5758e85b5
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -0,0 +1,247 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httptransport provides functionality for managing HTTP client
+// connections to Google Cloud services.
+package httptransport
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "cloud.google.com/go/auth"
+ detect "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// ClientCertProvider is a function that returns a TLS client certificate to be
+// used when opening TLS connections. It follows the same semantics as
+// [crypto/tls.Config.GetClientCertificate].
+type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+// Options used to configure a [net/http.Client] from [NewClient].
+type Options struct {
+ // DisableTelemetry disables default telemetry (OpenTelemetry). An example
+ // reason to do so would be to bind custom telemetry that overrides the
+ // defaults.
+ DisableTelemetry bool
+ // DisableAuthentication specifies that no authentication should be used. It
+ // is suitable only for testing and for accessing public resources, like
+ // public Google Cloud Storage buckets.
+ DisableAuthentication bool
+ // Headers are extra HTTP headers that will be appended to every outgoing
+ // request.
+ Headers http.Header
+ // BaseRoundTripper overrides the base transport used for serving requests.
+ // If specified ClientCertProvider is ignored.
+ BaseRoundTripper http.RoundTripper
+ // Endpoint overrides the default endpoint to be used for a service.
+ Endpoint string
+ // APIKey specifies an API key to be used as the basis for authentication.
+ // If set DetectOpts are ignored.
+ APIKey string
+ // Credentials used to add Authorization header to all requests. If set
+ // DetectOpts are ignored.
+ Credentials *auth.Credentials
+ // ClientCertProvider is a function that returns a TLS client certificate to
+ // be used when opening TLS connections. It follows the same semantics as
+ // crypto/tls.Config.GetClientCertificate.
+ ClientCertProvider ClientCertProvider
+ // DetectOpts configures settings for detect Application Default
+ // Credentials.
+ DetectOpts *detect.DetectOptions
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // The default value is "googleapis.com". This is the universe domain
+ // configured for the client, which will be compared to the universe domain
+ // that is separately configured for the credentials.
+ UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+
+ // InternalOptions are NOT meant to be set directly by consumers of this
+ // package, they should only be set by generated client code.
+ InternalOptions *InternalOptions
+}
+
+func (o *Options) validate() error {
+ if o == nil {
+ return errors.New("httptransport: opts required to be non-nil")
+ }
+ if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
+ return nil
+ }
+ hasCreds := o.APIKey != "" ||
+ o.Credentials != nil ||
+ (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
+ (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
+ if o.DisableAuthentication && hasCreds {
+ return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials")
+ }
+ return nil
+}
+
+// client returns the client a user set for the detect options or nil if one was
+// not set.
+func (o *Options) client() *http.Client {
+ if o.DetectOpts != nil && o.DetectOpts.Client != nil {
+ return o.DetectOpts.Client
+ }
+ return nil
+}
+
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+func (o *Options) resolveDetectOptions() *detect.DetectOptions {
+ io := o.InternalOptions
+ // soft-clone these so we are not updating a ref the user holds and may reuse
+ do := transport.CloneDetectOptions(o.DetectOpts)
+
+ // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
+ if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
+ do.UseSelfSignedJWT = true
+ }
+ // Only default scopes if user did not also set an audience.
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
+ do.Scopes = make([]string, len(io.DefaultScopes))
+ copy(do.Scopes, io.DefaultScopes)
+ }
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
+ do.Audience = o.InternalOptions.DefaultAudience
+ }
+ if o.ClientCertProvider != nil {
+ tlsConfig := &tls.Config{
+ GetClientCertificate: o.ClientCertProvider,
+ }
+ do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
+ do.TokenURL = detect.GoogleMTLSTokenURL
+ }
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
+ return do
+}
+
+// InternalOptions are only meant to be set by generated client code. These are
+// not meant to be set directly by consumers of this package. Configuration in
+// this type is considered EXPERIMENTAL and may be removed at any time in the
+// future without warning.
+type InternalOptions struct {
+ // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
+ EnableJWTWithScope bool
+ // DefaultAudience specifies a default audience to be used as the audience
+ // field ("aud") for the JWT token authentication.
+ DefaultAudience string
+ // DefaultEndpointTemplate combined with UniverseDomain specifies the
+ // default endpoint.
+ DefaultEndpointTemplate string
+ // DefaultMTLSEndpoint specifies the default mTLS endpoint.
+ DefaultMTLSEndpoint string
+ // DefaultScopes specifies the default OAuth2 scopes to be used for a
+ // service.
+ DefaultScopes []string
+ // SkipValidation bypasses validation on Options. It should only be used
+ // internally for clients that need more control over their transport.
+ SkipValidation bool
+ // SkipUniverseDomainValidation skips the verification that the universe
+ // domain configured for the client matches the universe domain configured
+ // for the credentials. It should only be used internally for clients that
+ // need more control over their transport. The default is false.
+ SkipUniverseDomainValidation bool
+}
+
+// AddAuthorizationMiddleware adds a middleware to the provided client's
+// transport that sets the Authorization header with the value produced by the
+// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
+// if client or creds is nil.
+//
+// This function does not support setting a universe domain value on the client.
+func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
+ if client == nil || creds == nil {
+ return fmt.Errorf("httptransport: client and tp must not be nil")
+ }
+ base := client.Transport
+ if base == nil {
+ if dt, ok := http.DefaultTransport.(*http.Transport); ok {
+ base = dt.Clone()
+ } else {
+ // Directly reuse the DefaultTransport if the application has
+ // replaced it with an implementation of RoundTripper other than
+ // http.Transport.
+ base = http.DefaultTransport
+ }
+ }
+ client.Transport = &authTransport{
+ creds: creds,
+ base: base,
+ }
+ return nil
+}
+
+// NewClient returns a [net/http.Client] that can be used to communicate with a
+// Google cloud service, configured with the provided [Options]. It
+// automatically appends Authorization headers to all outgoing requests.
+func NewClient(opts *Options) (*http.Client, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+
+ tOpts := &transport.Options{
+ Endpoint: opts.Endpoint,
+ ClientCertProvider: opts.ClientCertProvider,
+ Client: opts.client(),
+ UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
+ }
+ if io := opts.InternalOptions; io != nil {
+ tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
+ tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
+ }
+ clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts)
+ if err != nil {
+ return nil, err
+ }
+ baseRoundTripper := opts.BaseRoundTripper
+ if baseRoundTripper == nil {
+ baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext)
+ }
+ // Ensure the token exchange transport uses the same ClientCertProvider as the API transport.
+ opts.ClientCertProvider = clientCertProvider
+ trans, err := newTransport(baseRoundTripper, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &http.Client{
+ Transport: trans,
+ }, nil
+}
+
+// SetAuthHeader uses the provided token to set the Authorization header on a
+// request. If the token.Type is empty, the type is assumed to be Bearer.
+func SetAuthHeader(token *auth.Token, req *http.Request) {
+ typ := token.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ req.Header.Set("Authorization", typ+" "+token.Value)
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
new file mode 100644
index 000000000..ee215b6dc
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -0,0 +1,234 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptransport
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "os"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport"
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+ "golang.org/x/net/http2"
+)
+
+const (
+ quotaProjectHeaderKey = "X-goog-user-project"
+)
+
+func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
+ var headers = opts.Headers
+ ht := &headerTransport{
+ base: base,
+ headers: headers,
+ }
+ var trans http.RoundTripper = ht
+ trans = addOpenTelemetryTransport(trans, opts)
+ switch {
+ case opts.DisableAuthentication:
+ // Do nothing.
+ case opts.APIKey != "":
+ qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey))
+ if qp != "" {
+ if headers == nil {
+ headers = make(map[string][]string, 1)
+ }
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ trans = &apiKeyTransport{
+ Transport: trans,
+ Key: opts.APIKey,
+ }
+ default:
+ var creds *auth.Credentials
+ if opts.Credentials != nil {
+ creds = opts.Credentials
+ } else {
+ var err error
+ creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
+ if err != nil {
+ return nil, err
+ }
+ }
+ qp, err := creds.QuotaProjectID(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ if qp != "" {
+ if headers == nil {
+ headers = make(map[string][]string, 1)
+ }
+ // Don't overwrite user specified quota
+ if v := headers.Get(quotaProjectHeaderKey); v == "" {
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ }
+ var skipUD bool
+ if iOpts := opts.InternalOptions; iOpts != nil {
+ skipUD = iOpts.SkipUniverseDomainValidation
+ }
+ creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
+ trans = &authTransport{
+ base: trans,
+ creds: creds,
+ clientUniverseDomain: opts.UniverseDomain,
+ skipUniverseDomainValidation: skipUD,
+ }
+ }
+ return trans, nil
+}
+
+// defaultBaseTransport returns the base HTTP transport.
+// On App Engine, this is urlfetch.Transport.
+// Otherwise, use a default transport, taking most defaults from
+// http.DefaultTransport.
+// If TLSCertificate is available, set TLSClientConfig as well.
+func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
+ defaultTransport, ok := http.DefaultTransport.(*http.Transport)
+ if !ok {
+ defaultTransport = transport.BaseTransport()
+ }
+ trans := defaultTransport.Clone()
+ trans.MaxIdleConnsPerHost = 100
+
+ if clientCertSource != nil {
+ trans.TLSClientConfig = &tls.Config{
+ GetClientCertificate: clientCertSource,
+ }
+ }
+ if dialTLSContext != nil {
+ // If DialTLSContext is set, TLSClientConfig wil be ignored
+ trans.DialTLSContext = dialTLSContext
+ }
+
+ // Configures the ReadIdleTimeout HTTP/2 option for the
+ // transport. This allows broken idle connections to be pruned more quickly,
+ // preventing the client from attempting to re-use connections that will no
+ // longer work.
+ http2Trans, err := http2.ConfigureTransports(trans)
+ if err == nil {
+ http2Trans.ReadIdleTimeout = time.Second * 31
+ }
+
+ return trans
+}
+
+type apiKeyTransport struct {
+ // Key is the API Key to set on requests.
+ Key string
+ // Transport is the underlying HTTP transport.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+}
+
+func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ newReq := *req
+ args := newReq.URL.Query()
+ args.Set("key", t.Key)
+ newReq.URL.RawQuery = args.Encode()
+ return t.Transport.RoundTrip(&newReq)
+}
+
+type headerTransport struct {
+ headers http.Header
+ base http.RoundTripper
+}
+
+func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.base
+ newReq := *req
+ newReq.Header = make(http.Header)
+ for k, vv := range req.Header {
+ newReq.Header[k] = vv
+ }
+
+ for k, v := range t.headers {
+ newReq.Header[k] = v
+ }
+
+ return rt.RoundTrip(&newReq)
+}
+
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+ if opts.DisableTelemetry {
+ return trans
+ }
+ return otelhttp.NewTransport(trans)
+}
+
+type authTransport struct {
+ creds *auth.Credentials
+ base http.RoundTripper
+ clientUniverseDomain string
+ skipUniverseDomainValidation bool
+}
+
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
+func (t *authTransport) getClientUniverseDomain() string {
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
+ }
+ return internal.DefaultUniverseDomain
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token from Transport's Source. Per the RoundTripper contract we must
+// not modify the initial request, so we clone it, and we must close the body
+// on any errors that happens during our token logic.
+func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+ token, err := t.creds.Token(req.Context())
+ if err != nil {
+ return nil, err
+ }
+ if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
+ if err != nil {
+ return nil, err
+ }
+ if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
+ return nil, err
+ }
+ }
+ req2 := req.Clone(req.Context())
+ SetAuthHeader(token, req2)
+ reqBodyClosed = true
+ return t.base.RoundTrip(req2)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
new file mode 100644
index 000000000..05c7e8bdd
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var (
+ vmOnGCEOnce sync.Once
+ vmOnGCE bool
+)
+
+// OnComputeEngine returns whether the client is running on GCE.
+//
+// This is a copy of the gRPC internal googlecloud.OnGCE() func at:
+// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go
+// The functionality is similar to the metadata.OnGCE() func at:
+// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go
+// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server.
+// In particular, OnComputeEngine() will return false on Serverless.
+func OnComputeEngine() bool {
+ vmOnGCEOnce.Do(func() {
+ mf, err := manufacturer()
+ if err != nil {
+ log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err)
+ return
+ }
+ vmOnGCE = isRunningOnGCE(mf, runtime.GOOS)
+ })
+ return vmOnGCE
+}
+
+// isRunningOnGCE checks whether the local system, without doing a network request, is
+// running on GCP.
+func isRunningOnGCE(manufacturer []byte, goos string) bool {
+ name := string(manufacturer)
+ switch goos {
+ case "linux":
+ name = strings.TrimSpace(name)
+ return name == "Google" || name == "Google Compute Engine"
+ case "windows":
+ name = strings.Replace(name, " ", "", -1)
+ name = strings.Replace(name, "\n", "", -1)
+ name = strings.Replace(name, "\r", "", -1)
+ return name == "Google"
+ default:
+ return false
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
new file mode 100644
index 000000000..af490bf4f
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
@@ -0,0 +1,22 @@
+//go:build !(linux || windows)
+// +build !linux,!windows
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+func manufacturer() ([]byte, error) {
+ return nil, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
new file mode 100644
index 000000000..d92178df8
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import "os"
+
+const linuxProductNameFile = "/sys/class/dmi/id/product_name"
+
+func manufacturer() ([]byte, error) {
+ return os.ReadFile(linuxProductNameFile)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
new file mode 100644
index 000000000..16be9df30
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "errors"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ windowsCheckCommand = "powershell.exe"
+ windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
+ powershellOutputFilter = "Manufacturer"
+ windowsManufacturerRegex = ":(.*)"
+)
+
+func manufacturer() ([]byte, error) {
+ cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
+ out, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
+ if strings.HasPrefix(line, powershellOutputFilter) {
+ re := regexp.MustCompile(windowsManufacturerRegex)
+ name := re.FindString(line)
+ name = strings.TrimLeft(name, ":")
+ return []byte(name), nil
+ }
+ }
+ return nil, errors.New("cannot determine the machine's manufacturer")
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
new file mode 100644
index 000000000..9cd4bed61
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
@@ -0,0 +1,107 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credsfile is meant to hide implementation details from the pubic
+// surface of the detect package. It should not import any other packages in
+// this module. It is located under the main internal package so other
+// sub-packages can use these parsed types as well.
+package credsfile
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+)
+
+const (
+ // GoogleAppCredsEnvVar is the environment variable for setting the
+ // application default credentials.
+ GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ userCredsFilename = "application_default_credentials.json"
+)
+
+// CredentialType represents different credential filetypes Google credentials
+// can be.
+type CredentialType int
+
+const (
+ // UnknownCredType is an unidentified file type.
+ UnknownCredType CredentialType = iota
+ // UserCredentialsKey represents a user creds file type.
+ UserCredentialsKey
+ // ServiceAccountKey represents a service account file type.
+ ServiceAccountKey
+ // ImpersonatedServiceAccountKey represents a impersonated service account
+ // file type.
+ ImpersonatedServiceAccountKey
+ // ExternalAccountKey represents a external account file type.
+ ExternalAccountKey
+ // GDCHServiceAccountKey represents a GDCH file type.
+ GDCHServiceAccountKey
+ // ExternalAccountAuthorizedUserKey represents a external account authorized
+ // user file type.
+ ExternalAccountAuthorizedUserKey
+)
+
+// parseCredentialType returns the associated filetype based on the parsed
+// typeString provided.
+func parseCredentialType(typeString string) CredentialType {
+ switch typeString {
+ case "service_account":
+ return ServiceAccountKey
+ case "authorized_user":
+ return UserCredentialsKey
+ case "impersonated_service_account":
+ return ImpersonatedServiceAccountKey
+ case "external_account":
+ return ExternalAccountKey
+ case "external_account_authorized_user":
+ return ExternalAccountAuthorizedUserKey
+ case "gdch_service_account":
+ return GDCHServiceAccountKey
+ default:
+ return UnknownCredType
+ }
+}
+
+// GetFileNameFromEnv returns the override if provided or detects a filename
+// from the environment.
+func GetFileNameFromEnv(override string) string {
+ if override != "" {
+ return override
+ }
+ return os.Getenv(GoogleAppCredsEnvVar)
+}
+
+// GetWellKnownFileName tries to locate the filepath for the user credential
+// file based on the environment.
+func GetWellKnownFileName() string {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename)
+}
+
+// guessUnixHomeDir default to checking for HOME, but not all unix systems have
+// this set, do have a fallback.
+func guessUnixHomeDir() string {
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
new file mode 100644
index 000000000..3be6e5bbb
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
@@ -0,0 +1,157 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credsfile
+
+import (
+ "encoding/json"
+)
+
+// Config3LO is the internals of a client creds file.
+type Config3LO struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+}
+
+// ClientCredentialsFile representation.
+type ClientCredentialsFile struct {
+ Web *Config3LO `json:"web"`
+ Installed *Config3LO `json:"installed"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ServiceAccountFile representation.
+type ServiceAccountFile struct {
+ Type string `json:"type"`
+ ProjectID string `json:"project_id"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ ClientEmail string `json:"client_email"`
+ ClientID string `json:"client_id"`
+ AuthURL string `json:"auth_uri"`
+ TokenURL string `json:"token_uri"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// UserCredentialsFile representation.
+type UserCredentialsFile struct {
+ Type string `json:"type"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ QuotaProjectID string `json:"quota_project_id"`
+ RefreshToken string `json:"refresh_token"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ExternalAccountFile representation.
+type ExternalAccountFile struct {
+ Type string `json:"type"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ Audience string `json:"audience"`
+ SubjectTokenType string `json:"subject_token_type"`
+ ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
+ TokenURL string `json:"token_url"`
+ CredentialSource *CredentialSource `json:"credential_source,omitempty"`
+ TokenInfoURL string `json:"token_info_url"`
+ ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"`
+ QuotaProjectID string `json:"quota_project_id"`
+ WorkforcePoolUserProject string `json:"workforce_pool_user_project"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ExternalAccountAuthorizedUserFile representation.
+type ExternalAccountAuthorizedUserFile struct {
+ Type string `json:"type"`
+ Audience string `json:"audience"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+ TokenURL string `json:"token_url"`
+ TokenInfoURL string `json:"token_info_url"`
+ RevokeURL string `json:"revoke_url"`
+ QuotaProjectID string `json:"quota_project_id"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
+//
+// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question.
+// The EnvironmentID should start with AWS if being used for an AWS credential.
+type CredentialSource struct {
+ File string `json:"file"`
+ URL string `json:"url"`
+ Headers map[string]string `json:"headers"`
+ Executable *ExecutableConfig `json:"executable,omitempty"`
+ Certificate *CertificateConfig `json:"certificate"`
+ EnvironmentID string `json:"environment_id"` // TODO: Make type for this
+ RegionURL string `json:"region_url"`
+ RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
+ CredVerificationURL string `json:"cred_verification_url"`
+ IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
+ Format *Format `json:"format,omitempty"`
+}
+
+// Format describes the format of a [CredentialSource].
+type Format struct {
+ // Type is either "text" or "json". When not provided "text" type is assumed.
+ Type string `json:"type"`
+ // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure.
+ SubjectTokenFieldName string `json:"subject_token_field_name"`
+}
+
+// ExecutableConfig represents the command to run for an executable
+// [CredentialSource].
+type ExecutableConfig struct {
+ Command string `json:"command"`
+ TimeoutMillis int `json:"timeout_millis"`
+ OutputFile string `json:"output_file"`
+}
+
+// CertificateConfig represents the options used to set up X509 based workload
+// [CredentialSource]
+type CertificateConfig struct {
+ UseDefaultCertificateConfig bool `json:"use_default_certificate_config"`
+ CertificateConfigLocation string `json:"certificate_config_location"`
+}
+
+// ServiceAccountImpersonationInfo has impersonation configuration.
+type ServiceAccountImpersonationInfo struct {
+ TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
+}
+
+// ImpersonatedServiceAccountFile representation.
+type ImpersonatedServiceAccountFile struct {
+ Type string `json:"type"`
+ ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
+ Delegates []string `json:"delegates"`
+ CredSource json.RawMessage `json:"source_credentials"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file.
+type GDCHServiceAccountFile struct {
+ Type string `json:"type"`
+ FormatVersion string `json:"format_version"`
+ Project string `json:"project"`
+ Name string `json:"name"`
+ CertPath string `json:"ca_cert_path"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ TokenURL string `json:"token_uri"`
+ UniverseDomain string `json:"universe_domain"`
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
new file mode 100644
index 000000000..a02b9f5df
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
@@ -0,0 +1,98 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credsfile
+
+import (
+ "encoding/json"
+)
+
+// ParseServiceAccount parses bytes into a [ServiceAccountFile].
+func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) {
+ var f *ServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseClientCredentials parses bytes into a
+// [credsfile.ClientCredentialsFile].
+func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) {
+ var f *ClientCredentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseUserCredentials parses bytes into a [UserCredentialsFile].
+func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) {
+ var f *UserCredentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseExternalAccount parses bytes into a [ExternalAccountFile].
+func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) {
+ var f *ExternalAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseExternalAccountAuthorizedUser parses bytes into a
+// [ExternalAccountAuthorizedUserFile].
+func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) {
+ var f *ExternalAccountAuthorizedUserFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseImpersonatedServiceAccount parses bytes into a
+// [ImpersonatedServiceAccountFile].
+func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) {
+ var f *ImpersonatedServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile].
+func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) {
+ var f *GDCHServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type fileTypeChecker struct {
+ Type string `json:"type"`
+}
+
+// ParseFileType determines the [CredentialType] based on bytes provided.
+func ParseFileType(b []byte) (CredentialType, error) {
+ var f fileTypeChecker
+ if err := json.Unmarshal(b, &f); err != nil {
+ return 0, err
+ }
+ return parseCredentialType(f.Type), nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
new file mode 100644
index 000000000..6a8eab6eb
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -0,0 +1,225 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "context"
+ "crypto"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+const (
+ // TokenTypeBearer is the auth header prefix for bearer tokens.
+ TokenTypeBearer = "Bearer"
+
+ // QuotaProjectEnvVar is the environment variable for setting the quota
+ // project.
+ QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
+
+ // DefaultUniverseDomain is the default value for universe domain.
+ // Universe domain is the default service domain for a given Cloud universe.
+ DefaultUniverseDomain = "googleapis.com"
+)
+
+type clonableTransport interface {
+ Clone() *http.Transport
+}
+
+// DefaultClient returns an [http.Client] with some defaults set. If
+// the current [http.DefaultTransport] is a [clonableTransport], as
+// is the case for an [*http.Transport], the clone will be used.
+// Otherwise the [http.DefaultTransport] is used directly.
+func DefaultClient() *http.Client {
+ if transport, ok := http.DefaultTransport.(clonableTransport); ok {
+ return &http.Client{
+ Transport: transport.Clone(),
+ Timeout: 30 * time.Second,
+ }
+ }
+
+ return &http.Client{
+ Transport: http.DefaultTransport,
+ Timeout: 30 * time.Second,
+ }
+}
+
+// ParseKey converts the binary contents of a private key file
+// to an crypto.Signer. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (crypto.Signer, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ var parsedKey crypto.PrivateKey
+ var err error
+ parsedKey, err = x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
+ }
+ }
+ parsed, ok := parsedKey.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("private key is not a signer")
+ }
+ return parsed, nil
+}
+
+// GetQuotaProject retrieves quota project with precedence being: override,
+// environment variable, creds json file.
+func GetQuotaProject(b []byte, override string) string {
+ if override != "" {
+ return override
+ }
+ if env := os.Getenv(QuotaProjectEnvVar); env != "" {
+ return env
+ }
+ if b == nil {
+ return ""
+ }
+ var v struct {
+ QuotaProject string `json:"quota_project_id"`
+ }
+ if err := json.Unmarshal(b, &v); err != nil {
+ return ""
+ }
+ return v.QuotaProject
+}
+
+// GetProjectID retrieves project with precedence being: override,
+// environment variable, creds json file.
+func GetProjectID(b []byte, override string) string {
+ if override != "" {
+ return override
+ }
+ if env := os.Getenv(projectEnvVar); env != "" {
+ return env
+ }
+ if b == nil {
+ return ""
+ }
+ var v struct {
+ ProjectID string `json:"project_id"` // standard service account key
+ Project string `json:"project"` // gdch key
+ }
+ if err := json.Unmarshal(b, &v); err != nil {
+ return ""
+ }
+ if v.ProjectID != "" {
+ return v.ProjectID
+ }
+ return v.Project
+}
+
+// DoRequest executes the provided req with the client. It reads the response
+// body, closes it, and returns it.
+func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) {
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+ body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize))
+ if err != nil {
+ return nil, nil, err
+ }
+ return resp, body, nil
+}
+
+// ReadAll consumes the whole reader and safely reads the content of its body
+// with some overflow protection.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(io.LimitReader(r, maxBodySize))
+}
+
+// StaticCredentialsProperty is a helper for creating static credentials
+// properties.
+func StaticCredentialsProperty(s string) StaticProperty {
+ return StaticProperty(s)
+}
+
+// StaticProperty always returns that value of the underlying string.
+type StaticProperty string
+
+// GetProperty loads the properly value provided the given context.
+func (p StaticProperty) GetProperty(context.Context) (string, error) {
+ return string(p), nil
+}
+
+// ComputeUniverseDomainProvider fetches the credentials universe domain from
+// the google cloud metadata service.
+type ComputeUniverseDomainProvider struct {
+ MetadataClient *metadata.Client
+ universeDomainOnce sync.Once
+ universeDomain string
+ universeDomainErr error
+}
+
+// GetProperty fetches the credentials universe domain from the google cloud
+// metadata service.
+func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
+ c.universeDomainOnce.Do(func() {
+ c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient)
+ })
+ if c.universeDomainErr != nil {
+ return "", c.universeDomainErr
+ }
+ return c.universeDomain, nil
+}
+
+// httpGetMetadataUniverseDomain is a package var for unit test substitution.
+var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) {
+ ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
+ defer cancel()
+ return client.GetWithContext(ctx, "universe/universe-domain")
+}
+
+func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) {
+ universeDomain, err := httpGetMetadataUniverseDomain(ctx, client)
+ if err == nil {
+ return universeDomain, nil
+ }
+ if _, ok := err.(metadata.NotDefinedError); ok {
+ // http.StatusNotFound (404)
+ return DefaultUniverseDomain, nil
+ }
+ return "", err
+}
+
+// FormatIAMServiceAccountResource sets a service account name in an IAM resource
+// name.
+func FormatIAMServiceAccountResource(name string) string {
+ return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
new file mode 100644
index 000000000..9bd55f510
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
@@ -0,0 +1,171 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jwt
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+const (
+ // HeaderAlgRSA256 is the RS256 [Header.Algorithm].
+ HeaderAlgRSA256 = "RS256"
+ // HeaderAlgES256 is the ES256 [Header.Algorithm].
+ HeaderAlgES256 = "ES256"
+ // HeaderType is the standard [Header.Type].
+ HeaderType = "JWT"
+)
+
+// Header represents a JWT header.
+type Header struct {
+ Algorithm string `json:"alg"`
+ Type string `json:"typ"`
+ KeyID string `json:"kid"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Claims represents the claims set of a JWT.
+type Claims struct {
+ // Iss is the issuer JWT claim.
+ Iss string `json:"iss"`
+ // Scope is the scope JWT claim.
+ Scope string `json:"scope,omitempty"`
+ // Exp is the expiry JWT claim. If unset, default is in one hour from now.
+ Exp int64 `json:"exp"`
+ // Iat is the subject issued at claim. If unset, default is now.
+ Iat int64 `json:"iat"`
+ // Aud is the audience JWT claim. Optional.
+ Aud string `json:"aud"`
+ // Sub is the subject JWT claim. Optional.
+ Sub string `json:"sub,omitempty"`
+ // AdditionalClaims contains any additional non-standard JWT claims. Optional.
+ AdditionalClaims map[string]interface{} `json:"-"`
+}
+
+func (c *Claims) encode() (string, error) {
+ // Compensate for skew
+ now := time.Now().Add(-10 * time.Second)
+ if c.Iat == 0 {
+ c.Iat = now.Unix()
+ }
+ if c.Exp == 0 {
+ c.Exp = now.Add(time.Hour).Unix()
+ }
+ if c.Exp < c.Iat {
+ return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat)
+ }
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.AdditionalClaims) == 0 {
+ return base64.RawURLEncoding.EncodeToString(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.AdditionalClaims)
+ if err != nil {
+ return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// EncodeJWS encodes the data using the provided key as a JSON web signature.
+func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ claims, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, claims)
+ h := sha256.New()
+ h.Write([]byte(ss))
+ sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
+}
+
+// DecodeJWS decodes a claim set from a JWS payload.
+func DecodeJWS(payload string) (*Claims, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ return nil, errors.New("invalid token received")
+ }
+ decoded, err := base64.RawURLEncoding.DecodeString(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &Claims{}
+ if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil {
+ return nil, err
+ }
+ return c, err
+}
+
+// VerifyJWS tests whether the provided JWT token's signature was produced by
+// the private key associated with the provided public key.
+func VerifyJWS(token string, key *rsa.PublicKey) error {
+ parts := strings.Split(token, ".")
+ if len(parts) != 3 {
+ return errors.New("jwt: invalid token received, token must have 3 parts")
+ }
+
+ signedContent := parts[0] + "." + parts[1]
+ signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return err
+ }
+
+ h := sha256.New()
+ h.Write([]byte(signedContent))
+ return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
new file mode 100644
index 000000000..b1f0fcf93
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -0,0 +1,385 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "log"
+ "log/slog"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "github.com/google/s2a-go"
+ "github.com/google/s2a-go/fallback"
+ "google.golang.org/grpc/credentials"
+)
+
+const (
+ mTLSModeAlways = "always"
+ mTLSModeNever = "never"
+ mTLSModeAuto = "auto"
+
+ // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
+ googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
+ googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
+ googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT"
+ googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
+
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+
+ mtlsMDSRoot = "/run/google-mds-mtls/root.crt"
+ mtlsMDSKey = "/run/google-mds-mtls/client.key"
+)
+
+// Type represents the type of transport used.
+type Type int
+
+const (
+ // TransportTypeUnknown represents an unknown transport type and is the default option.
+ TransportTypeUnknown Type = iota
+ // TransportTypeMTLSS2A represents the mTLS transport type using S2A.
+ TransportTypeMTLSS2A
+)
+
+// Options is a struct that is duplicated information from the individual
+// transport packages in order to avoid cyclic deps. It correlates 1:1 with
+// fields on httptransport.Options and grpctransport.Options.
+type Options struct {
+ Endpoint string
+ DefaultEndpointTemplate string
+ DefaultMTLSEndpoint string
+ ClientCertProvider cert.Provider
+ Client *http.Client
+ UniverseDomain string
+ EnableDirectPath bool
+ EnableDirectPathXds bool
+ Logger *slog.Logger
+}
+
+// getUniverseDomain returns the default service domain for a given Cloud
+// universe.
+func (o *Options) getUniverseDomain() string {
+ if o.UniverseDomain == "" {
+ return internal.DefaultUniverseDomain
+ }
+ return o.UniverseDomain
+}
+
+// isUniverseDomainGDU returns true if the universe domain is the default Google
+// universe.
+func (o *Options) isUniverseDomainGDU() bool {
+ return o.getUniverseDomain() == internal.DefaultUniverseDomain
+}
+
+// defaultEndpoint returns the DefaultEndpointTemplate merged with the
+// universe domain if the DefaultEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultEndpoint() string {
+ if o.DefaultEndpointTemplate == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
+// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the
+// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultMTLSEndpoint() string {
+ if o.DefaultMTLSEndpoint == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
+// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
+// default endpoint.
+func (o *Options) mergedEndpoint() (string, error) {
+ defaultEndpoint := o.defaultEndpoint()
+ u, err := url.Parse(fixScheme(defaultEndpoint))
+ if err != nil {
+ return "", err
+ }
+ return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil
+}
+
+func fixScheme(baseURL string) string {
+ if !strings.Contains(baseURL, "://") {
+ baseURL = "https://" + baseURL
+ }
+ return baseURL
+}
+
+// GRPCTransportCredentials embeds interface TransportCredentials with additional data.
+type GRPCTransportCredentials struct {
+ credentials.TransportCredentials
+ Endpoint string
+ TransportType Type
+}
+
+// GetGRPCTransportCredsAndEndpoint returns an instance of
+// [google.golang.org/grpc/credentials.TransportCredentials], and the
+// corresponding endpoint and transport type to use for GRPC client.
+func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) {
+ config, err := getTransportConfig(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ defaultTransportCreds := credentials.NewTLS(&tls.Config{
+ GetClientCertificate: config.clientCertSource,
+ })
+
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+
+ var fallbackOpts *s2a.FallbackOptions
+ // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
+ if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
+ fallbackOpts = &s2a.FallbackOptions{
+ FallbackClientHandshakeFunc: fallbackHandshake,
+ }
+ }
+
+ s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
+ })
+ if err != nil {
+ // Use default if we cannot initialize S2A client transport credentials.
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+ return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil
+}
+
+// GetHTTPTransportConfig returns a client certificate source and a function for
+// dialing MTLS with S2A.
+func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) {
+ config, err := getTransportConfig(opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
+
+ var fallbackOpts *s2a.FallbackOptions
+ // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
+ if fallbackURL, err := url.Parse(config.endpoint); err == nil {
+ if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
+ fallbackOpts = &s2a.FallbackOptions{
+ FallbackDialer: &s2a.FallbackDialer{
+ Dialer: fallbackDialer,
+ ServerAddr: fallbackServerAddr,
+ },
+ }
+ }
+ }
+
+ dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
+ })
+ return nil, dialTLSContextFunc, nil
+}
+
+func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) {
+ rootPEM, err := os.ReadFile(mtlsMDSRootFile)
+ if err != nil {
+ return nil, err
+ }
+ caCertPool := x509.NewCertPool()
+ ok := caCertPool.AppendCertsFromPEM(rootPEM)
+ if !ok {
+ return nil, errors.New("failed to load MTLS MDS root certificate")
+ }
+ // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain
+ // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the
+ // tls.X509KeyPair function as both the certificate chain and private key arguments.
+ cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := tls.Config{
+ RootCAs: caCertPool,
+ Certificates: []tls.Certificate{cert},
+ MinVersion: tls.VersionTLS13,
+ }
+ return credentials.NewTLS(&tlsConfig), nil
+}
+
+func getTransportConfig(opts *Options) (*transportConfig, error) {
+ clientCertSource, err := GetClientCertificateProvider(opts)
+ if err != nil {
+ return nil, err
+ }
+ endpoint, err := getEndpoint(opts, clientCertSource)
+ if err != nil {
+ return nil, err
+ }
+ defaultTransportConfig := transportConfig{
+ clientCertSource: clientCertSource,
+ endpoint: endpoint,
+ }
+
+ if !shouldUseS2A(clientCertSource, opts) {
+ return &defaultTransportConfig, nil
+ }
+
+ s2aAddress := GetS2AAddress(opts.Logger)
+ mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger)
+ if s2aAddress == "" && mtlsS2AAddress == "" {
+ return &defaultTransportConfig, nil
+ }
+ return &transportConfig{
+ clientCertSource: clientCertSource,
+ endpoint: endpoint,
+ s2aAddress: s2aAddress,
+ mtlsS2AAddress: mtlsS2AAddress,
+ s2aMTLSEndpoint: opts.defaultMTLSEndpoint(),
+ }, nil
+}
+
+// GetClientCertificateProvider returns a default client certificate source, if
+// not provided by the user.
+//
+// A nil default source can be returned if the source does not exist. Any exceptions
+// encountered while initializing the default source will be reported as client
+// error (ex. corrupt metadata file).
+func GetClientCertificateProvider(opts *Options) (cert.Provider, error) {
+ if !isClientCertificateEnabled(opts) {
+ return nil, nil
+ } else if opts.ClientCertProvider != nil {
+ return opts.ClientCertProvider, nil
+ }
+ return cert.DefaultProvider()
+
+}
+
+// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var
+func isClientCertificateEnabled(opts *Options) bool {
+ if value, ok := os.LookupEnv(googleAPIUseCertSource); ok {
+ // error as false is OK
+ b, _ := strconv.ParseBool(value)
+ return b
+ }
+ return opts.isUniverseDomainGDU()
+}
+
+type transportConfig struct {
+ // The client certificate source.
+ clientCertSource cert.Provider
+ // The corresponding endpoint to use based on client certificate source.
+ endpoint string
+ // The plaintext S2A address if it can be used, otherwise an empty string.
+ s2aAddress string
+ // The MTLS S2A address if it can be used, otherwise an empty string.
+ mtlsS2AAddress string
+ // The MTLS endpoint to use with S2A.
+ s2aMTLSEndpoint string
+}
+
+// getEndpoint returns the endpoint for the service, taking into account the
+// user-provided endpoint override "settings.Endpoint".
+//
+// If no endpoint override is specified, we will either return the default
+// endpoint or the default mTLS endpoint if a client certificate is available.
+//
+// You can override the default endpoint choice (mTLS vs. regular) by setting
+// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
+//
+// If the endpoint override is an address (host:port) rather than full base
+// URL (ex. https://...), then the user-provided address will be merged into
+// the default endpoint. For example, WithEndpoint("myhost:8000") and
+// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return
+// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS
+// endpoint.
+func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
+ if opts.Endpoint == "" {
+ mtlsMode := getMTLSMode()
+ if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
+ return opts.defaultMTLSEndpoint(), nil
+ }
+ return opts.defaultEndpoint(), nil
+ }
+ if strings.Contains(opts.Endpoint, "://") {
+ // User passed in a full URL path, use it verbatim.
+ return opts.Endpoint, nil
+ }
+ if opts.defaultEndpoint() == "" {
+ // If DefaultEndpointTemplate is not configured,
+ // use the user provided endpoint verbatim. This allows a naked
+ // "host[:port]" URL to be used with GRPC Direct Path.
+ return opts.Endpoint, nil
+ }
+
+ // Assume user-provided endpoint is host[:port], merge it with the default endpoint.
+ return opts.mergedEndpoint()
+}
+
+func getMTLSMode() string {
+ mode := os.Getenv(googleAPIUseMTLS)
+ if mode == "" {
+ mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated.
+ }
+ if mode == "" {
+ return mTLSModeAuto
+ }
+ return strings.ToLower(mode)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
new file mode 100644
index 000000000..5cedc50f1
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
@@ -0,0 +1,65 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "errors"
+ "sync"
+)
+
+// defaultCertData holds all the variables pertaining to
+// the default certificate provider created by [DefaultProvider].
+//
+// A singleton model is used to allow the provider to be reused
+// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil)
+// may be returned to indicate a default provider could not be found, which
+// will skip extra tls config in the transport layer .
+type defaultCertData struct {
+ once sync.Once
+ provider Provider
+ err error
+}
+
+var (
+ defaultCert defaultCertData
+)
+
+// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate.
+type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
+var errSourceUnavailable = errors.New("certificate source is unavailable")
+
+// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource.
+// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
+//
+// If neither source is available (due to missing configurations), a nil Source and a nil Error are
+// returned to indicate that a default certificate source is unavailable.
+func DefaultProvider() (Provider, error) {
+ defaultCert.once.Do(func() {
+ defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = NewSecureConnectProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = nil, nil
+ }
+ }
+ }
+ })
+ return defaultCert.provider, defaultCert.err
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
new file mode 100644
index 000000000..6c954ae19
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
@@ -0,0 +1,54 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client"
+)
+
+type ecpSource struct {
+ key *client.Key
+}
+
+// NewEnterpriseCertificateProxyProvider creates a certificate source
+// using the Enterprise Certificate Proxy client, which delegates
+// certifcate related operations to an OS-specific "signer binary"
+// that communicates with the native keystore (ex. keychain on MacOS).
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate issuer and the location of the signer binary.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
+ key, err := client.Cred(configFilePath)
+ if err != nil {
+ // TODO(codyoss): once this is fixed upstream can handle this error a
+ // little better here. But be safe for now and assume unavailable.
+ return nil, errSourceUnavailable
+ }
+
+ return (&ecpSource{
+ key: key,
+ }).getClientCertificate, nil
+}
+
+func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ var cert tls.Certificate
+ cert.PrivateKey = s.key
+ cert.Certificate = s.key.CertificateChain()
+ return &cert, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
new file mode 100644
index 000000000..738cb2161
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
@@ -0,0 +1,124 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+const (
+ metadataPath = ".secureConnect"
+ metadataFile = "context_aware_metadata.json"
+)
+
+type secureConnectSource struct {
+ metadata secureConnectMetadata
+
+ // Cache the cert to avoid executing helper command repeatedly.
+ cachedCertMutex sync.Mutex
+ cachedCert *tls.Certificate
+}
+
+type secureConnectMetadata struct {
+ Cmd []string `json:"cert_provider_command"`
+}
+
+// NewSecureConnectProvider creates a certificate source using
+// the Secure Connect Helper and its associated metadata file.
+//
+// The configFilePath points to the location of the context aware metadata file.
+// If configFilePath is empty, use the default context aware metadata location.
+func NewSecureConnectProvider(configFilePath string) (Provider, error) {
+ if configFilePath == "" {
+ user, err := user.Current()
+ if err != nil {
+ // Error locating the default config means Secure Connect is not supported.
+ return nil, errSourceUnavailable
+ }
+ configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
+ }
+
+ file, err := os.ReadFile(configFilePath)
+ if err != nil {
+ // Config file missing means Secure Connect is not supported.
+ // There are non-os.ErrNotExist errors that may be returned.
+ // (e.g. if the home directory is /dev/null, *nix systems will
+ // return ENOTDIR instead of ENOENT)
+ return nil, errSourceUnavailable
+ }
+
+ var metadata secureConnectMetadata
+ if err := json.Unmarshal(file, &metadata); err != nil {
+ return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
+ }
+ if err := validateMetadata(metadata); err != nil {
+ return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
+ }
+ return (&secureConnectSource{
+ metadata: metadata,
+ }).getClientCertificate, nil
+}
+
+func validateMetadata(metadata secureConnectMetadata) error {
+ if len(metadata.Cmd) == 0 {
+ return errors.New("empty cert_provider_command")
+ }
+ return nil
+}
+
+func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ s.cachedCertMutex.Lock()
+ defer s.cachedCertMutex.Unlock()
+ if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
+ return s.cachedCert, nil
+ }
+ // Expand OS environment variables in the cert provider command such as "$HOME".
+ for i := 0; i < len(s.metadata.Cmd); i++ {
+ s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
+ }
+ command := s.metadata.Cmd
+ data, err := exec.Command(command[0], command[1:]...).Output()
+ if err != nil {
+ return nil, err
+ }
+ cert, err := tls.X509KeyPair(data, data)
+ if err != nil {
+ return nil, err
+ }
+ s.cachedCert = &cert
+ return &cert, nil
+}
+
+// isCertificateExpired returns true if the given cert is expired or invalid.
+func isCertificateExpired(cert *tls.Certificate) bool {
+ if len(cert.Certificate) == 0 {
+ return true
+ }
+ parsed, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ return true
+ }
+ return time.Now().After(parsed.NotAfter)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
new file mode 100644
index 000000000..347aaced7
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
@@ -0,0 +1,114 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client/util"
+)
+
+type certConfigs struct {
+ Workload *workloadSource `json:"workload"`
+}
+
+type workloadSource struct {
+ CertPath string `json:"cert_path"`
+ KeyPath string `json:"key_path"`
+}
+
+type certificateConfig struct {
+ CertConfigs certConfigs `json:"cert_configs"`
+}
+
+// NewWorkloadX509CertProvider creates a certificate source
+// that reads a certificate and private key file from the local file system.
+// This is intended to be used for workload identity federation.
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate and key file paths.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) {
+ if configFilePath == "" {
+ envFilePath := util.GetConfigFilePathFromEnv()
+ if envFilePath != "" {
+ configFilePath = envFilePath
+ } else {
+ configFilePath = util.GetDefaultConfigFilePath()
+ }
+ }
+
+ certFile, keyFile, err := getCertAndKeyFiles(configFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ source := &workloadSource{
+ CertPath: certFile,
+ KeyPath: keyFile,
+ }
+ return source.getClientCertificate, nil
+}
+
+// getClientCertificate attempts to load the certificate and key from the files specified in the
+// certificate config.
+func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath)
+ if err != nil {
+ return nil, err
+ }
+ return &cert, nil
+}
+
+// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private
+// key file paths.
+func getCertAndKeyFiles(configFilePath string) (string, string, error) {
+ jsonFile, err := os.Open(configFilePath)
+ if err != nil {
+ return "", "", errSourceUnavailable
+ }
+
+ byteValue, err := io.ReadAll(jsonFile)
+ if err != nil {
+ return "", "", err
+ }
+
+ var config certificateConfig
+ if err := json.Unmarshal(byteValue, &config); err != nil {
+ return "", "", err
+ }
+
+ if config.CertConfigs.Workload == nil {
+ return "", "", errSourceUnavailable
+ }
+
+ certFile := config.CertConfigs.Workload.CertPath
+ keyFile := config.CertConfigs.Workload.KeyPath
+
+ if certFile == "" {
+ return "", "", errors.New("certificate configuration is missing the certificate file location")
+ }
+
+ if keyFile == "" {
+ return "", "", errors.New("certificate configuration is missing the key file location")
+ }
+
+ return certFile, keyFile, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
new file mode 100644
index 000000000..a63309956
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
@@ -0,0 +1,138 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "log/slog"
+ "os"
+ "strconv"
+ "sync"
+
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "cloud.google.com/go/compute/metadata"
+)
+
+const (
+ configEndpointSuffix = "instance/platform-security/auto-mtls-configuration"
+)
+
+var (
+ mtlsConfiguration *mtlsConfig
+
+ mtlsOnce sync.Once
+)
+
+// GetS2AAddress returns the S2A address to be reached via plaintext connection.
+// Returns empty string if not set or invalid.
+func GetS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.PlaintextAddress
+}
+
+// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection.
+// Returns empty string if not set or invalid.
+func GetMTLSS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.MTLSAddress
+}
+
+// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
+type mtlsConfig struct {
+ S2A *s2aAddresses `json:"s2a"`
+}
+
+func (c *mtlsConfig) valid() bool {
+ return c != nil && c.S2A != nil
+}
+
+// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
+type s2aAddresses struct {
+ // PlaintextAddress is the plaintext address to reach S2A
+ PlaintextAddress string `json:"plaintext_address"`
+ // MTLSAddress is the MTLS address to reach S2A
+ MTLSAddress string `json:"mtls_address"`
+}
+
+func getMetadataMTLSAutoConfig(logger *slog.Logger) {
+ var err error
+ mtlsOnce.Do(func() {
+ mtlsConfiguration, err = queryConfig(logger)
+ if err != nil {
+ log.Printf("Getting MTLS config failed: %v", err)
+ }
+ })
+}
+
+var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: logger,
+ })
+ return metadataClient.GetWithContext(context.Background(), configEndpointSuffix)
+}
+
+func queryConfig(logger *slog.Logger) (*mtlsConfig, error) {
+ resp, err := httpGetMetadataMTLSConfig(logger)
+ if err != nil {
+ return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err)
+ }
+ var config mtlsConfig
+ err = json.Unmarshal([]byte(resp), &config)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err)
+ }
+ if config.S2A == nil {
+ return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config)
+ }
+ return &config, nil
+}
+
+func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
+ // If client cert is found, use that over S2A.
+ if clientCertSource != nil {
+ return false
+ }
+ // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
+ if !isGoogleS2AEnabled() {
+ return false
+ }
+ // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A.
+ if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" {
+ return false
+ }
+ // If custom HTTP client is provided, skip S2A.
+ if opts.Client != nil {
+ return false
+ }
+ // If directPath is enabled, skip S2A.
+ return !opts.EnableDirectPath && !opts.EnableDirectPathXds
+}
+
+func isGoogleS2AEnabled() bool {
+ b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv))
+ if err != nil {
+ return false
+ }
+ return b
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
new file mode 100644
index 000000000..5c8721efa
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
@@ -0,0 +1,107 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provided internal helpers for the two transport packages
+// (grpctransport and httptransport).
+package transport
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth/credentials"
+)
+
+// CloneDetectOptions clones a user set detect option into some new memory that
+// we can internally manipulate before sending onto the detect package.
+func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions {
+ if oldDo == nil {
+ // it is valid for users not to set this, but we will need to to default
+ // some options for them in this case so return some initialized memory
+ // to work with.
+ return &credentials.DetectOptions{}
+ }
+ newDo := &credentials.DetectOptions{
+ // Simple types
+ TokenBindingType: oldDo.TokenBindingType,
+ Audience: oldDo.Audience,
+ Subject: oldDo.Subject,
+ EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
+ TokenURL: oldDo.TokenURL,
+ STSAudience: oldDo.STSAudience,
+ CredentialsFile: oldDo.CredentialsFile,
+ UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
+ UniverseDomain: oldDo.UniverseDomain,
+
+ // These fields are pointer types that we just want to use exactly as
+ // the user set, copy the ref
+ Client: oldDo.Client,
+ Logger: oldDo.Logger,
+ AuthHandlerOptions: oldDo.AuthHandlerOptions,
+ }
+
+ // Smartly size this memory and copy below.
+ if len(oldDo.CredentialsJSON) > 0 {
+ newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON))
+ copy(newDo.CredentialsJSON, oldDo.CredentialsJSON)
+ }
+ if len(oldDo.Scopes) > 0 {
+ newDo.Scopes = make([]string, len(oldDo.Scopes))
+ copy(newDo.Scopes, oldDo.Scopes)
+ }
+
+ return newDo
+}
+
+// ValidateUniverseDomain verifies that the universe domain configured for the
+// client matches the universe domain configured for the credentials.
+func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error {
+ if clientUniverseDomain != credentialsUniverseDomain {
+ return fmt.Errorf(
+ "the configured universe domain (%q) does not match the universe "+
+ "domain found in the credentials (%q). If you haven't configured "+
+ "the universe domain explicitly, \"googleapis.com\" is the default",
+ clientUniverseDomain,
+ credentialsUniverseDomain)
+ }
+ return nil
+}
+
+// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS.
+func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client {
+ trans := BaseTransport()
+ trans.TLSClientConfig = tlsConfig
+ return &http.Client{Transport: trans}
+}
+
+// BaseTransport returns a default [http.Transport] which can be used if
+// [http.DefaultTransport] has been overwritten.
+func BaseTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
new file mode 100644
index 000000000..42716752e
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
@@ -0,0 +1,82 @@
+# Changelog
+
+## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161)
+
+## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52))
+
+## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+
+## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
+
+## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)
+
+
+### Features
+
+* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## 0.1.0 (2023-10-19)
+
+
+### Features
+
+* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f))
+* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
new file mode 100644
index 000000000..9cc33e5ee
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
@@ -0,0 +1,200 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth]
+// and [golang.org/x/oauth2].
+package oauth2adapt
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+
+ "cloud.google.com/go/auth"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+)
+
+const (
+ oauth2TokenSourceKey = "oauth2.google.tokenSource"
+ oauth2ServiceAccountKey = "oauth2.google.serviceAccount"
+ authTokenSourceKey = "auth.google.tokenSource"
+ authServiceAccountKey = "auth.google.serviceAccount"
+)
+
+// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
+// into a [cloud.google.com/go/auth.TokenProvider].
+func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
+ return &tokenProviderAdapter{ts: ts}
+}
+
+type tokenProviderAdapter struct {
+ ts oauth2.TokenSource
+}
+
+// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It
+// is a light wrapper around the underlying TokenSource.
+func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
+ tok, err := tp.ts.Token()
+ if err != nil {
+ var err2 *oauth2.RetrieveError
+ if ok := errors.As(err, &err2); ok {
+ return nil, AuthErrorFromRetrieveError(err2)
+ }
+ return nil, err
+ }
+ // Preserve compute token metadata, for both types of tokens.
+ metadata := map[string]interface{}{}
+ if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok {
+ metadata[authTokenSourceKey] = val
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok {
+ metadata[authServiceAccountKey] = val
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ return &auth.Token{
+ Value: tok.AccessToken,
+ Type: tok.Type(),
+ Expiry: tok.Expiry,
+ Metadata: metadata,
+ }, nil
+}
+
+// TokenSourceFromTokenProvider converts any
+// [cloud.google.com/go/auth.TokenProvider] into a
+// [golang.org/x/oauth2.TokenSource].
+func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource {
+ return &tokenSourceAdapter{tp: tp}
+}
+
+type tokenSourceAdapter struct {
+ tp auth.TokenProvider
+}
+
+// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It
+// is a light wrapper around the underlying TokenProvider.
+func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
+ tok, err := ts.tp.Token(context.Background())
+ if err != nil {
+ var err2 *auth.Error
+ if ok := errors.As(err, &err2); ok {
+ return nil, AddRetrieveErrorToAuthError(err2)
+ }
+ return nil, err
+ }
+ tok2 := &oauth2.Token{
+ AccessToken: tok.Value,
+ TokenType: tok.Type,
+ Expiry: tok.Expiry,
+ }
+ // Preserve token metadata.
+ m := tok.Metadata
+ if m != nil {
+ // Copy map to avoid concurrent map writes error (#11161).
+ metadata := make(map[string]interface{}, len(m)+2)
+ for k, v := range m {
+ metadata[k] = v
+ }
+ // Append compute token metadata in converted form.
+ if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" {
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" {
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ tok2 = tok2.WithExtra(metadata)
+ }
+ return tok2, nil
+}
+
+// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
+// to a [cloud.google.com/go/auth.Credentials].
+func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials {
+ if creds == nil {
+ return nil
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: TokenProviderFromTokenSource(creds.TokenSource),
+ JSON: creds.JSON,
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return creds.ProjectID, nil
+ }),
+ UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return creds.GetUniverseDomain()
+ }),
+ })
+}
+
+// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials]
+// to a [golang.org/x/oauth2/google.Credentials].
+func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials {
+ if creds == nil {
+ return nil
+ }
+ // Throw away errors as old credentials are not request aware. Also, no
+ // network requests are currently happening for this use case.
+ projectID, _ := creds.ProjectID(context.Background())
+
+ return &google.Credentials{
+ TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider),
+ ProjectID: projectID,
+ JSON: creds.JSON(),
+ UniverseDomainProvider: func() (string, error) {
+ return creds.UniverseDomain(context.Background())
+ },
+ }
+}
+
+type oauth2Error struct {
+ ErrorCode string `json:"error"`
+ ErrorDescription string `json:"error_description"`
+ ErrorURI string `json:"error_uri"`
+}
+
+// AddRetrieveErrorToAuthError returns the same error provided and adds a
+// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the
+// [cloud.google.com/go/auth.Error].
+func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error {
+ if err == nil {
+ return nil
+ }
+ e := &oauth2.RetrieveError{
+ Response: err.Response,
+ Body: err.Body,
+ }
+ err.Err = e
+ if len(err.Body) > 0 {
+ var oErr oauth2Error
+ // ignore the error as it only fills in extra details
+ json.Unmarshal(err.Body, &oErr)
+ e.ErrorCode = oErr.ErrorCode
+ e.ErrorDescription = oErr.ErrorDescription
+ e.ErrorURI = oErr.ErrorURI
+ }
+ return err
+}
+
+// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that
+// wraps the provided [golang.org/x/oauth2.RetrieveError].
+func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error {
+ if err == nil {
+ return nil
+ }
+ return &auth.Error{
+ Response: err.Response,
+ Body: err.Body,
+ Err: err,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go
new file mode 100644
index 000000000..07804dc16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/threelegged.go
@@ -0,0 +1,382 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
+// OAuth consent at the specified auth code URL and returns an auth code and
+// state upon approval.
+type AuthorizationHandler func(authCodeURL string) (code string, state string, err error)
+
+// Options3LO are the options for doing a 3-legged OAuth2 flow.
+type Options3LO struct {
+ // ClientID is the application's ID.
+ ClientID string
+ // ClientSecret is the application's secret. Not required if AuthHandlerOpts
+ // is set.
+ ClientSecret string
+ // AuthURL is the URL for authenticating.
+ AuthURL string
+ // TokenURL is the URL for retrieving a token.
+ TokenURL string
+ // AuthStyle is used to describe how to client info in the token request.
+ AuthStyle Style
+ // RefreshToken is the token used to refresh the credential. Not required
+ // if AuthHandlerOpts is set.
+ RefreshToken string
+ // RedirectURL is the URL to redirect users to. Optional.
+ RedirectURL string
+ // Scopes specifies requested permissions for the Token. Optional.
+ Scopes []string
+
+ // URLParams are the set of values to apply to the token exchange. Optional.
+ URLParams url.Values
+ // Client is the client to be used to make the underlying token requests.
+ // Optional.
+ Client *http.Client
+ // EarlyTokenExpiry is the time before the token expires that it should be
+ // refreshed. If not set the default value is 3 minutes and 45 seconds.
+ // Optional.
+ EarlyTokenExpiry time.Duration
+
+ // AuthHandlerOpts provides a set of options for doing a
+ // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
+ AuthHandlerOpts *AuthorizationHandlerOptions
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options3LO) validate() error {
+ if o == nil {
+ return errors.New("auth: options must be provided")
+ }
+ if o.ClientID == "" {
+ return errors.New("auth: client ID must be provided")
+ }
+ if o.AuthHandlerOpts == nil && o.ClientSecret == "" {
+ return errors.New("auth: client secret must be provided")
+ }
+ if o.AuthURL == "" {
+ return errors.New("auth: auth URL must be provided")
+ }
+ if o.TokenURL == "" {
+ return errors.New("auth: token URL must be provided")
+ }
+ if o.AuthStyle == StyleUnknown {
+ return errors.New("auth: auth style must be provided")
+ }
+ if o.AuthHandlerOpts == nil && o.RefreshToken == "" {
+ return errors.New("auth: refresh token must be provided")
+ }
+ return nil
+}
+
+func (o *Options3LO) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+// PKCEOptions holds parameters to support PKCE.
+type PKCEOptions struct {
+ // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
+ Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier.
+ // ChallengeMethod is the encryption method (ex. S256).
+ ChallengeMethod string
+ // Verifier is the original, non-encrypted secret.
+ Verifier string // The original, non-encrypted secret.
+}
+
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn int `json:"expires_in"`
+ // error fields
+ ErrorCode string `json:"error"`
+ ErrorDescription string `json:"error_description"`
+ ErrorURI string `json:"error_uri"`
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+func (o *Options3LO) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+// authCodeURL returns a URL that points to a OAuth2 consent page.
+func (o *Options3LO) authCodeURL(state string, values url.Values) string {
+ var buf bytes.Buffer
+ buf.WriteString(o.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {o.ClientID},
+ }
+ if o.RedirectURL != "" {
+ v.Set("redirect_uri", o.RedirectURL)
+ }
+ if len(o.Scopes) > 0 {
+ v.Set("scope", strings.Join(o.Scopes, " "))
+ }
+ if state != "" {
+ v.Set("state", state)
+ }
+ if o.AuthHandlerOpts != nil {
+ if o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.Challenge != "" {
+ v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge)
+ }
+ if o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" {
+ v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod)
+ }
+ }
+ for k := range values {
+ v.Set(k, v.Get(k))
+ }
+ if strings.Contains(o.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2
+// configuration. The TokenProvider is caches and auto-refreshes tokens by
+// default.
+func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ if opts.AuthHandlerOpts != nil {
+ return new3LOTokenProviderWithAuthHandler(opts), nil
+ }
+ return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenExpiry,
+ }), nil
+}
+
+// AuthorizationHandlerOptions provides a set of options to specify for doing a
+// 3-legged OAuth2 flow with a custom [AuthorizationHandler].
+type AuthorizationHandlerOptions struct {
+ // AuthorizationHandler specifies the handler used to for the authorization
+ // part of the flow.
+ Handler AuthorizationHandler
+ // State is used verify that the "state" is identical in the request and
+ // response before exchanging the auth code for OAuth2 token.
+ State string
+ // PKCEOpts allows setting configurations for PKCE. Optional.
+ PKCEOpts *PKCEOptions
+}
+
+func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider {
+ return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenExpiry,
+ })
+}
+
+// exchange handles the final exchange portion of the 3lo flow. Returns a Token,
+// refreshToken, and error.
+func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) {
+ // Build request
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if o.RedirectURL != "" {
+ v.Set("redirect_uri", o.RedirectURL)
+ }
+ if o.AuthHandlerOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.Verifier != "" {
+ v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier)
+ }
+ for k := range o.URLParams {
+ v.Set(k, o.URLParams.Get(k))
+ }
+ return fetchToken(ctx, o, v)
+}
+
+// This struct is not safe for concurrent access alone, but the way it is used
+// in this package by wrapping it with a cachedTokenProvider makes it so.
+type tokenProvider3LO struct {
+ opts *Options3LO
+ client *http.Client
+ refreshToken string
+}
+
+func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) {
+ if tp.refreshToken == "" {
+ return nil, errors.New("auth: token expired and refresh token is not set")
+ }
+ v := url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tp.refreshToken},
+ }
+ for k := range tp.opts.URLParams {
+ v.Set(k, tp.opts.URLParams.Get(k))
+ }
+
+ tk, rt, err := fetchToken(ctx, tp.opts, v)
+ if err != nil {
+ return nil, err
+ }
+ if tp.refreshToken != rt && rt != "" {
+ tp.refreshToken = rt
+ }
+ return tk, err
+}
+
+type tokenProviderWithHandler struct {
+ opts *Options3LO
+ state string
+}
+
+func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) {
+ url := tp.opts.authCodeURL(tp.state, nil)
+ code, state, err := tp.opts.AuthHandlerOpts.Handler(url)
+ if err != nil {
+ return nil, err
+ }
+ if state != tp.state {
+ return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow")
+ }
+ tok, _, err := tp.opts.exchange(ctx, code)
+ return tok, err
+}
+
+// fetchToken returns a Token, refresh token, and/or an error.
+func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) {
+ var refreshToken string
+ if o.AuthStyle == StyleInParams {
+ if o.ClientID != "" {
+ v.Set("client_id", o.ClientID)
+ }
+ if o.ClientSecret != "" {
+ v.Set("client_secret", o.ClientSecret)
+ }
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, refreshToken, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if o.AuthStyle == StyleInHeader {
+ req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
+ }
+ logger := o.logger()
+
+ logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ // Make request
+ resp, body, err := internal.DoRequest(o.client(), req)
+ if err != nil {
+ return nil, refreshToken, err
+ }
+ logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body))
+ failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299
+ tokError := &Error{
+ Response: resp,
+ Body: body,
+ }
+
+ var token *Token
+ // errors ignored because of default switch on content
+ content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ // some endpoints return a query string
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ if failureStatus {
+ return nil, refreshToken, tokError
+ }
+ return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err)
+ }
+ tokError.code = vals.Get("error")
+ tokError.description = vals.Get("error_description")
+ tokError.uri = vals.Get("error_uri")
+ token = &Token{
+ Value: vals.Get("access_token"),
+ Type: vals.Get("token_type"),
+ Metadata: make(map[string]interface{}, len(vals)),
+ }
+ for k, v := range vals {
+ token.Metadata[k] = v
+ }
+ refreshToken = vals.Get("refresh_token")
+ e := vals.Get("expires_in")
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ if failureStatus {
+ return nil, refreshToken, tokError
+ }
+ return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err)
+ }
+ tokError.code = tj.ErrorCode
+ tokError.description = tj.ErrorDescription
+ tokError.uri = tj.ErrorURI
+ token = &Token{
+ Value: tj.AccessToken,
+ Type: tj.TokenType,
+ Expiry: tj.expiry(),
+ Metadata: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Metadata) // optional field, skip err check
+ refreshToken = tj.RefreshToken
+ }
+ // according to spec, servers should respond status 400 in error case
+ // https://www.rfc-editor.org/rfc/rfc6749#section-5.2
+ // but some unorthodox servers respond 200 in error case
+ if failureStatus || tokError.code != "" {
+ return nil, refreshToken, tokError
+ }
+ if token.Value == "" {
+ return nil, refreshToken, errors.New("auth: server response missing access_token")
+ }
+ return token, refreshToken, nil
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 967e06074..1f848ce0b 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,52 @@
# Changes
+## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.6.0...compute/metadata/v0.7.0) (2025-05-13)
+
+
+### Features
+
+* **compute/metadata:** Allow canceling GCE detection ([#11786](https://github.com/googleapis/google-cloud-go/issues/11786)) ([78100fe](https://github.com/googleapis/google-cloud-go/commit/78100fe7e28cd30f1e10b47191ac3c9839663b64))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13)
+
+
+### Features
+
+* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
+
+
+### Features
+
+* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302))
+
+## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01)
+
+
+### Features
+
+* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3))
+
+
+### Documentation
+
+* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f))
+
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go
new file mode 100644
index 000000000..8ec673b88
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/log.go
@@ -0,0 +1,149 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+)
+
+// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog
+// to avoid the dependency. The compute/metadata module is used by too many
+// non-client library modules that can't justify the dependency.
+
+// The handler returned if logging is not enabled.
+type noOpHandler struct{}
+
+func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
+ return false
+}
+
+func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
+ return nil
+}
+
+func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
+ return h
+}
+
+func (h noOpHandler) WithGroup(_ string) slog.Handler {
+ return h
+}
+
+// httpRequest returns a lazily evaluated [slog.LogValuer] for a
+// [http.Request] and the associated body.
+func httpRequest(req *http.Request, body []byte) slog.LogValuer {
+ return &request{
+ req: req,
+ payload: body,
+ }
+}
+
+type request struct {
+ req *http.Request
+ payload []byte
+}
+
+func (r *request) LogValue() slog.Value {
+ if r == nil || r.req == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
+ groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.req.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+// httpResponse returns a lazily evaluated [slog.LogValuer] for a
+// [http.Response] and the associated body.
+func httpResponse(resp *http.Response, body []byte) slog.LogValuer {
+ return &response{
+ resp: resp,
+ payload: body,
+ }
+}
+
+type response struct {
+ resp *http.Response
+ payload []byte
+}
+
+func (r *response) LogValue() slog.Value {
+ if r == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.resp.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+func processPayload(payload []byte) (slog.Attr, bool) {
+ peekChar := payload[0]
+ if peekChar == '{' {
+ // JSON object
+ var m map[string]any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else if peekChar == '[' {
+ // JSON array
+ var m []any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else {
+ // Everything else
+ buf := &bytes.Buffer{}
+ if err := json.Compact(buf, payload); err != nil {
+ // Write raw payload incase of error
+ buf.Write(payload)
+ }
+ return slog.String("payload", buf.String()), true
+ }
+ return slog.Attr{}, false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index f67e3c7ee..322be8032 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -24,11 +24,11 @@ import (
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net"
"net/http"
"net/url"
"os"
- "runtime"
"strings"
"sync"
"time"
@@ -61,7 +61,10 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: newDefaultHTTPClient()}
+var defaultClient = &Client{
+ hc: newDefaultHTTPClient(),
+ logger: slog.New(noOpHandler{}),
+}
func newDefaultHTTPClient() *http.Client {
return &http.Client{
@@ -88,16 +91,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
-func (c *cachedValue) get(cl *Client) (v string, err error) {
+func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
- v, err = cl.getTrimmed(context.Background(), c.k)
+ v, err = cl.getTrimmed(ctx, c.k)
} else {
- v, err = cl.GetWithContext(context.Background(), c.k)
+ v, err = cl.GetWithContext(ctx, c.k)
}
if err == nil {
c.v = v
@@ -110,99 +113,27 @@ var (
onGCE bool
)
-// OnGCE reports whether this process is running on Google Compute Engine.
+// OnGCE reports whether this process is running on Google Compute Platforms.
+// NOTE: True returned from `OnGCE` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
func OnGCE() bool {
- onGCEOnce.Do(initOnGCE)
- return onGCE
-}
-
-func initOnGCE() {
- onGCE = testOnGCE()
+ return OnGCEWithContext(context.Background())
}
-func testOnGCE() bool {
- // The user explicitly said they're on GCE, so trust them.
- if os.Getenv(metadataHostEnv) != "" {
- return true
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- resc := make(chan bool, 2)
-
- // Try two strategies in parallel.
- // See https://github.com/googleapis/google-cloud-go/issues/194
- go func() {
- req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
- req.Header.Set("User-Agent", userAgent)
- res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
- if err != nil {
- resc <- false
- return
- }
- defer res.Body.Close()
- resc <- res.Header.Get("Metadata-Flavor") == "Google"
- }()
-
- go func() {
- resolver := &net.Resolver{}
- addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
- if err != nil || len(addrs) == 0 {
- resc <- false
- return
- }
- resc <- strsContains(addrs, metadataIP)
- }()
-
- tryHarder := systemInfoSuggestsGCE()
- if tryHarder {
- res := <-resc
- if res {
- // The first strategy succeeded, so let's use it.
- return true
- }
- // Wait for either the DNS or metadata server probe to
- // contradict the other one and say we are running on
- // GCE. Give it a lot of time to do so, since the system
- // info already suggests we're running on a GCE BIOS.
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
- select {
- case res = <-resc:
- return res
- case <-timer.C:
- // Too slow. Who knows what this system is.
- return false
- }
- }
-
- // There's no hint from the system info that we're running on
- // GCE, so use the first probe's result as truth, whether it's
- // true or false. The goal here is to optimize for speed for
- // users who are NOT running on GCE. We can't assume that
- // either a DNS lookup or an HTTP request to a blackholed IP
- // address is fast. Worst case this should return when the
- // metaClient's Transport.ResponseHeaderTimeout or
- // Transport.Dial.Timeout fires (in two seconds).
- return <-resc
-}
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
- if runtime.GOOS != "linux" {
- // We don't have any non-Linux clues available, at least yet.
- return false
- }
- slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
- name := strings.TrimSpace(string(slurp))
- return name == "Google" || name == "Google Compute Engine"
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// This function's return value is memoized for better performance.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func OnGCEWithContext(ctx context.Context) bool {
+ onGCEOnce.Do(func() {
+ onGCE = defaultClient.OnGCEWithContext(ctx)
+ })
+ return onGCE
}
// Subscribe calls Client.SubscribeWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [SubscribeWithContext].
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
}
@@ -225,55 +156,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) {
}
// ProjectID returns the current instance's project ID string.
-func ProjectID() (string, error) { return defaultClient.ProjectID() }
+//
+// Deprecated: Please use the context aware variant [ProjectIDWithContext].
+func ProjectID() (string, error) {
+ return defaultClient.ProjectIDWithContext(context.Background())
+}
+
+// ProjectIDWithContext returns the current instance's project ID string.
+func ProjectIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ProjectIDWithContext(ctx)
+}
// NumericProjectID returns the current instance's numeric project ID.
-func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
+//
+// Deprecated: Please use the context aware variant [NumericProjectIDWithContext].
+func NumericProjectID() (string, error) {
+ return defaultClient.NumericProjectIDWithContext(context.Background())
+}
+
+// NumericProjectIDWithContext returns the current instance's numeric project ID.
+func NumericProjectIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.NumericProjectIDWithContext(ctx)
+}
// InternalIP returns the instance's primary internal IP address.
-func InternalIP() (string, error) { return defaultClient.InternalIP() }
+//
+// Deprecated: Please use the context aware variant [InternalIPWithContext].
+func InternalIP() (string, error) {
+ return defaultClient.InternalIPWithContext(context.Background())
+}
+
+// InternalIPWithContext returns the instance's primary internal IP address.
+func InternalIPWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InternalIPWithContext(ctx)
+}
// ExternalIP returns the instance's primary external (public) IP address.
-func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
+//
+// Deprecated: Please use the context aware variant [ExternalIPWithContext].
+func ExternalIP() (string, error) {
+ return defaultClient.ExternalIPWithContext(context.Background())
+}
+
+// ExternalIPWithContext returns the instance's primary external (public) IP address.
+func ExternalIPWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ExternalIPWithContext(ctx)
+}
+
+// Email calls Client.EmailWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [EmailWithContext].
+func Email(serviceAccount string) (string, error) {
+ return defaultClient.EmailWithContext(context.Background(), serviceAccount)
+}
-// Email calls Client.Email on the default client.
-func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
+// EmailWithContext calls Client.EmailWithContext on the default client.
+func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
+ return defaultClient.EmailWithContext(ctx, serviceAccount)
+}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
-func Hostname() (string, error) { return defaultClient.Hostname() }
+//
+// Deprecated: Please use the context aware variant [HostnameWithContext].
+func Hostname() (string, error) {
+ return defaultClient.HostnameWithContext(context.Background())
+}
+
+// HostnameWithContext returns the instance's hostname. This will be of the form
+// ".c..internal".
+func HostnameWithContext(ctx context.Context) (string, error) {
+ return defaultClient.HostnameWithContext(ctx)
+}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
-func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
+//
+// Deprecated: Please use the context aware variant [InstanceTagsWithContext].
+func InstanceTags() ([]string, error) {
+ return defaultClient.InstanceTagsWithContext(context.Background())
+}
+
+// InstanceTagsWithContext returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTagsWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.InstanceTagsWithContext(ctx)
+}
// InstanceID returns the current VM's numeric instance ID.
-func InstanceID() (string, error) { return defaultClient.InstanceID() }
+//
+// Deprecated: Please use the context aware variant [InstanceIDWithContext].
+func InstanceID() (string, error) {
+ return defaultClient.InstanceIDWithContext(context.Background())
+}
+
+// InstanceIDWithContext returns the current VM's numeric instance ID.
+func InstanceIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InstanceIDWithContext(ctx)
+}
// InstanceName returns the current VM's instance ID string.
-func InstanceName() (string, error) { return defaultClient.InstanceName() }
+//
+// Deprecated: Please use the context aware variant [InstanceNameWithContext].
+func InstanceName() (string, error) {
+ return defaultClient.InstanceNameWithContext(context.Background())
+}
+
+// InstanceNameWithContext returns the current VM's instance ID string.
+func InstanceNameWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InstanceNameWithContext(ctx)
+}
// Zone returns the current VM's zone, such as "us-central1-b".
-func Zone() (string, error) { return defaultClient.Zone() }
+//
+// Deprecated: Please use the context aware variant [ZoneWithContext].
+func Zone() (string, error) {
+ return defaultClient.ZoneWithContext(context.Background())
+}
-// InstanceAttributes calls Client.InstanceAttributes on the default client.
-func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
+// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
+func ZoneWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ZoneWithContext(ctx)
+}
-// ProjectAttributes calls Client.ProjectAttributes on the default client.
-func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
+// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [InstanceAttributesWithContext.
+func InstanceAttributes() ([]string, error) {
+ return defaultClient.InstanceAttributesWithContext(context.Background())
+}
-// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
+// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
+func InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.InstanceAttributesWithContext(ctx)
+}
+
+// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ProjectAttributesWithContext].
+func ProjectAttributes() ([]string, error) {
+ return defaultClient.ProjectAttributesWithContext(context.Background())
+}
+
+// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
+func ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.ProjectAttributesWithContext(ctx)
+}
+
+// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext].
func InstanceAttributeValue(attr string) (string, error) {
- return defaultClient.InstanceAttributeValue(attr)
+ return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr)
}
-// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
+// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client.
+func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return defaultClient.InstanceAttributeValueWithContext(ctx, attr)
+}
+
+// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext].
func ProjectAttributeValue(attr string) (string, error) {
- return defaultClient.ProjectAttributeValue(attr)
+ return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr)
+}
+
+// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client.
+func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return defaultClient.ProjectAttributeValueWithContext(ctx, attr)
}
-// Scopes calls Client.Scopes on the default client.
-func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
+// Scopes calls Client.ScopesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ScopesWithContext].
+func Scopes(serviceAccount string) ([]string, error) {
+ return defaultClient.ScopesWithContext(context.Background(), serviceAccount)
+}
+
+// ScopesWithContext calls Client.ScopesWithContext on the default client.
+func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
+ return defaultClient.ScopesWithContext(ctx, serviceAccount)
+}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
@@ -286,18 +350,120 @@ func strsContains(ss []string, s string) bool {
// A Client provides metadata.
type Client struct {
- hc *http.Client
+ hc *http.Client
+ logger *slog.Logger
+}
+
+// Options for configuring a [Client].
+type Options struct {
+ // Client is the HTTP client used to make requests. Optional.
+ Client *http.Client
+ // Logger is used to log information about HTTP request and responses.
+ // If not provided, nothing will be logged. Optional.
+ Logger *slog.Logger
}
// NewClient returns a Client that can be used to fetch metadata.
// Returns the client that uses the specified http.Client for HTTP requests.
// If nil is specified, returns the default client.
func NewClient(c *http.Client) *Client {
- if c == nil {
+ return NewWithOptions(&Options{
+ Client: c,
+ })
+}
+
+// NewWithOptions returns a Client that is configured with the provided Options.
+func NewWithOptions(opts *Options) *Client {
+ if opts == nil {
return defaultClient
}
+ client := opts.Client
+ if client == nil {
+ client = newDefaultHTTPClient()
+ }
+ logger := opts.Logger
+ if logger == nil {
+ logger = slog.New(noOpHandler{})
+ }
+ return &Client{hc: client, logger: logger}
+}
- return &Client{hc: c}
+// NOTE: metadataRequestStrategy is assigned to a variable for test stubbing purposes.
+var metadataRequestStrategy = func(ctx context.Context, httpClient *http.Client, resc chan bool) {
+ req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+ req.Header.Set("User-Agent", userAgent)
+ res, err := httpClient.Do(req.WithContext(ctx))
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+}
+
+// NOTE: dnsRequestStrategy is assigned to a variable for test stubbing purposes.
+var dnsRequestStrategy = func(ctx context.Context, resc chan bool) {
+ resolver := &net.Resolver{}
+ addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+}
+
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func (c *Client) OnGCEWithContext(ctx context.Context) bool {
+ // The user explicitly said they're on GCE, so trust them.
+ if os.Getenv(metadataHostEnv) != "" {
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/googleapis/google-cloud-go/issues/194
+ go metadataRequestStrategy(ctx, c.hc, resc)
+ go dnsRequestStrategy(ctx, resc)
+
+ tryHarder := systemInfoSuggestsGCE()
+ if tryHarder {
+ res := <-resc
+ if res {
+ // The first strategy succeeded, so let's use it.
+ return true
+ }
+
+ // Wait for either the DNS or metadata server probe to
+ // contradict the other one and say we are running on
+ // GCE. Give it a lot of time to do so, since the system
+ // info already suggests we're running on a GCE BIOS.
+ // Ensure cancellations from the calling context are respected.
+ waitContext, cancelWait := context.WithTimeout(ctx, 5*time.Second)
+ defer cancelWait()
+ select {
+ case res = <-resc:
+ return res
+ case <-waitContext.Done():
+ // Too slow. Who knows what this system is.
+ return false
+ }
+ }
+
+ // There's no hint from the system info that we're running on
+ // GCE, so use the first probe's result as truth, whether it's
+ // true or false. The goal here is to optimize for speed for
+ // users who are NOT running on GCE. We can't assume that
+ // either a DNS lookup or an HTTP request to a blackholed IP
+ // address is fast. Worst case this should return when the
+ // metaClient's Transport.ResponseHeaderTimeout or
+ // Transport.Dial.Timeout fires (in two seconds).
+ return <-resc
}
// getETag returns a value from the metadata service as well as the associated ETag.
@@ -327,14 +493,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
req.Header.Set("User-Agent", userAgent)
var res *http.Response
var reqErr error
+ var body []byte
retryer := newRetryer()
for {
+ c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil))
res, reqErr = c.hc.Do(req)
var code int
if res != nil {
code = res.StatusCode
+ body, err = io.ReadAll(res.Body)
+ if err != nil {
+ res.Body.Close()
+ return "", "", err
+ }
+ c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body))
+ res.Body.Close()
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
if err := sleep(ctx, delay); err != nil {
return "", "", err
}
@@ -345,18 +523,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if reqErr != nil {
return "", "", reqErr
}
- defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := io.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
if res.StatusCode != 200 {
- return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ return "", "", &Error{Code: res.StatusCode, Message: string(body)}
}
- return string(all), res.Header.Get("Etag"), nil
+ return string(body), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
@@ -381,6 +554,10 @@ func (c *Client) Get(suffix string) (string, error) {
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
+//
+// NOTE: Without an extra deadline in the context this call can take in the
+// worst case, with internal backoff retries, up to 15 seconds (e.g. when server
+// is responding slowly). Pass context with additional timeouts when needed.
func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
val, _, err := c.getETag(ctx, suffix)
return val, err
@@ -392,8 +569,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e
return
}
-func (c *Client) lines(suffix string) ([]string, error) {
- j, err := c.GetWithContext(context.Background(), suffix)
+func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) {
+ j, err := c.GetWithContext(ctx, suffix)
if err != nil {
return nil, err
}
@@ -405,45 +582,104 @@ func (c *Client) lines(suffix string) ([]string, error) {
}
// ProjectID returns the current instance's project ID string.
-func (c *Client) ProjectID() (string, error) { return projID.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext].
+func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) }
+
+// ProjectIDWithContext returns the current instance's project ID string.
+func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) }
// NumericProjectID returns the current instance's numeric project ID.
-func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext].
+func (c *Client) NumericProjectID() (string, error) {
+ return c.NumericProjectIDWithContext(context.Background())
+}
+
+// NumericProjectIDWithContext returns the current instance's numeric project ID.
+func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) {
+ return projNum.get(ctx, c)
+}
// InstanceID returns the current VM's numeric instance ID.
-func (c *Client) InstanceID() (string, error) { return instID.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext].
+func (c *Client) InstanceID() (string, error) {
+ return c.InstanceIDWithContext(context.Background())
+}
+
+// InstanceIDWithContext returns the current VM's numeric instance ID.
+func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) {
+ return instID.get(ctx, c)
+}
// InternalIP returns the instance's primary internal IP address.
+//
+// Deprecated: Please use the context aware variant [Client.InternalIPWithContext].
func (c *Client) InternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip")
+ return c.InternalIPWithContext(context.Background())
+}
+
+// InternalIPWithContext returns the instance's primary internal IP address.
+func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
+//
+// Deprecated: Please use the context aware variant [Client.EmailWithContext].
func (c *Client) Email(serviceAccount string) (string, error) {
+ return c.EmailWithContext(context.Background(), serviceAccount)
+}
+
+// EmailWithContext returns the email address associated with the service account.
+// The serviceAccount parameter default value (empty string or "default" value)
+// will use the instance's main account.
+func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email")
+ return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
+//
+// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext].
func (c *Client) ExternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip")
+ return c.ExternalIPWithContext(context.Background())
+}
+
+// ExternalIPWithContext returns the instance's primary external (public) IP address.
+func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
+//
+// Deprecated: Please use the context aware variant [Client.HostnameWithContext].
func (c *Client) Hostname() (string, error) {
- return c.getTrimmed(context.Background(), "instance/hostname")
+ return c.HostnameWithContext(context.Background())
}
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
+// HostnameWithContext returns the instance's hostname. This will be of the form
+// ".c..internal".
+func (c *Client) HostnameWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext].
func (c *Client) InstanceTags() ([]string, error) {
+ return c.InstanceTagsWithContext(context.Background())
+}
+
+// InstanceTagsWithContext returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) {
var s []string
- j, err := c.GetWithContext(context.Background(), "instance/tags")
+ j, err := c.GetWithContext(ctx, "instance/tags")
if err != nil {
return nil, err
}
@@ -454,13 +690,27 @@ func (c *Client) InstanceTags() ([]string, error) {
}
// InstanceName returns the current VM's instance ID string.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext].
func (c *Client) InstanceName() (string, error) {
- return c.getTrimmed(context.Background(), "instance/name")
+ return c.InstanceNameWithContext(context.Background())
+}
+
+// InstanceNameWithContext returns the current VM's instance ID string.
+func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/name")
}
// Zone returns the current VM's zone, such as "us-central1-b".
+//
+// Deprecated: Please use the context aware variant [Client.ZoneWithContext].
func (c *Client) Zone() (string, error) {
- zone, err := c.getTrimmed(context.Background(), "instance/zone")
+ return c.ZoneWithContext(context.Background())
+}
+
+// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
+func (c *Client) ZoneWithContext(ctx context.Context) (string, error) {
+ zone, err := c.getTrimmed(ctx, "instance/zone")
// zone is of the form "projects//zones/".
if err != nil {
return "", err
@@ -471,12 +721,34 @@ func (c *Client) Zone() (string, error) {
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
-func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
+//
+// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext].
+func (c *Client) InstanceAttributes() ([]string, error) {
+ return c.InstanceAttributesWithContext(context.Background())
+}
+
+// InstanceAttributesWithContext returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
+ return c.lines(ctx, "instance/attributes/")
+}
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
-func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
+//
+// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext].
+func (c *Client) ProjectAttributes() ([]string, error) {
+ return c.ProjectAttributesWithContext(context.Background())
+}
+
+// ProjectAttributesWithContext returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
+ return c.lines(ctx, "project/attributes/")
+}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
@@ -486,8 +758,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext].
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "instance/attributes/"+attr)
+ return c.InstanceAttributeValueWithContext(context.Background(), attr)
+}
+
+// InstanceAttributeValueWithContext returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return c.GetWithContext(ctx, "instance/attributes/"+attr)
}
// ProjectAttributeValue returns the value of the provided
@@ -498,18 +784,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) {
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
+//
+// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext].
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "project/attributes/"+attr)
+ return c.ProjectAttributeValueWithContext(context.Background(), attr)
+}
+
+// ProjectAttributeValueWithContext returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return c.GetWithContext(ctx, "project/attributes/"+attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
+//
+// Deprecated: Please use the context aware variant [Client.ScopesWithContext].
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
+ return c.ScopesWithContext(context.Background(), serviceAccount)
+}
+
+// ScopesWithContext returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
+ return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes")
}
// Subscribe subscribes to a value from the metadata service.
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
index bb412f891..2e53f0123 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -17,10 +17,15 @@
package metadata
-import "syscall"
+import (
+ "errors"
+ "syscall"
+)
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+ syscallRetryable = func(err error) bool {
+ return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED)
+ }
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
new file mode 100644
index 000000000..d57ae1b27
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
@@ -0,0 +1,28 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !linux
+
+package metadata
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+//
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ // We don't currently have checks for other GOOS
+ return false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
new file mode 100644
index 000000000..17ba5a3a2
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
@@ -0,0 +1,30 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package metadata
+
+import (
+ "os"
+ "strings"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ b, _ := os.ReadFile("/sys/class/dmi/id/product_name")
+
+ name := strings.TrimSpace(string(b))
+ return name == "Google" || name == "Google Compute Engine"
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
new file mode 100644
index 000000000..f57a5b14e
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
@@ -0,0 +1,39 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package metadata
+
+import (
+ "strings"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE)
+ if err != nil {
+ return false
+ }
+ defer k.Close()
+
+ s, _, err := k.GetStringValue("SystemProductName")
+ if err != nil {
+ return false
+ }
+ s = strings.TrimSpace(s)
+ return strings.HasPrefix(s, "Google")
+}
diff --git a/vendor/cloud.google.com/go/debug.md b/vendor/cloud.google.com/go/debug.md
new file mode 100644
index 000000000..052962e34
--- /dev/null
+++ b/vendor/cloud.google.com/go/debug.md
@@ -0,0 +1,271 @@
+# Logging, Debugging and Telemetry
+
+**Warning: The OpenCensus project is obsolete and was archived on July 31st,
+2023.** This means that any security vulnerabilities that are found will not be
+patched. We recommend that you migrate from OpenCensus tracing to
+OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for
+details.
+
+Logging, debugging and telemetry all capture data that can be used for
+troubleshooting. Logging records specific events and transactions. Debugging
+exposes values for immediate analysis. Telemetry is suitable for production use
+and can serve both logging and monitoring purposes. Telemetry tracing follows
+requests through a system to provide a view of component interactions. Telemetry
+metrics collects data for significant performance indicators, offering insights
+into a system's health.
+
+## Logging and debugging
+
+While working with the Go Client Libraries you may run into some situations
+where you need a deeper level of understanding about what is going on in order
+to solve your problem. Here are some tips and tricks that you can use in these
+cases. *Note* that many of the tips in this section will have a performance
+impact and are therefore not recommended for sustained production use. Use these
+tips locally or in production for a *limited time* to help get a better
+understanding of what is going on.
+
+### Request/Response Logging
+
+To enable logging for all outgoing requests from the Go Client Libraries, set
+the environment variable `GOOGLE_SDK_GO_LOGGING_LEVEL` to `debug`. Currently all
+logging is at the debug level, but this is likely to change in the future.
+
+*Caution*: Debug level logging should only be used in a limited manner. Debug
+level logs contain sensitive information, including headers, request/response
+payloads, and authentication tokens. Additionally, enabling logging at this
+level will have a minor performance impact.
+
+### HTTP based clients
+
+All of our auto-generated clients have a constructor to create a client that
+uses HTTP/JSON instead of gRPC. Additionally a couple of our hand-written
+clients like Storage and Bigquery are also HTTP based. Here are some tips for
+debugging these clients.
+
+#### Try setting Go's HTTP debug variable
+
+Try setting the following environment variable for verbose Go HTTP logging:
+GODEBUG=http2debug=1. To read more about this feature please see the godoc for
+[net/http](https://pkg.go.dev/net/http).
+
+*WARNING*: Enabling this debug variable will log headers and payloads which may
+contain private information.
+
+### gRPC based clients
+
+#### Try setting grpc-go's debug variables
+
+Try setting the following environment variables for grpc-go:
+`GRPC_GO_LOG_VERBOSITY_LEVEL=99` `GRPC_GO_LOG_SEVERITY_LEVEL=info`. These are
+good for diagnosing connection level failures. For more information please see
+[grpc-go's debug documentation](https://pkg.go.dev/google.golang.org/grpc/examples/features/debugging#section-readme).
+
+## Telemetry
+
+**Warning: The OpenCensus project is obsolete and was archived on July 31st,
+2023.** This means that any security vulnerabilities that are found will not be
+patched. We recommend that you migrate from OpenCensus tracing to
+OpenTelemetry, the successor project. The default experimental tracing support
+for OpenCensus is now deprecated in the Google Cloud client libraries for Go.
+See [OpenCensus](#opencensus) below for details.
+
+The Google Cloud client libraries for Go now use the
+[OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) project.
+The transition from OpenCensus to OpenTelemetry is covered in the following
+sections.
+
+### Tracing (experimental)
+
+Apart from spans created by underlying libraries such as gRPC, Google Cloud Go
+generated clients do not create spans. Only the spans created by following
+hand-written clients are in scope for the discussion in this section:
+
+* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery)
+* [cloud.google.com/go/bigtable](https://pkg.go.dev/cloud.google.com/go/bigtable)
+* [cloud.google.com/go/datastore](https://pkg.go.dev/cloud.google.com/go/datastore)
+* [cloud.google.com/go/firestore](https://pkg.go.dev/cloud.google.com/go/firestore)
+* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner)
+* [cloud.google.com/go/storage](https://pkg.go.dev/cloud.google.com/go/storage)
+
+Currently, the spans created by these clients are for OpenTelemetry. OpenCensus
+users are urged to transition to OpenTelemetry as soon as possible, as explained
+in the next section.
+
+#### OpenCensus
+
+**Warning: The OpenCensus project is obsolete and was archived on July 31st,
+2023.** This means that any security vulnerabilities that are found will not be
+patched. We recommend that you migrate from OpenCensus tracing to
+OpenTelemetry, the successor project. The default experimental tracing support
+for OpenCensus is now deprecated in the Google Cloud client libraries for Go.
+
+Using the [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus), you can immediately begin exporting your traces with OpenTelemetry, even while
+dependencies of your application remain instrumented with OpenCensus. If you do
+not use the bridge, you will need to migrate your entire application and all of
+its instrumented dependencies at once. For simple applications, this may be
+possible, but we expect the bridge to be helpful if multiple libraries with
+instrumentation are used.
+
+On May 29, 2024, six months after the
+[release](https://github.com/googleapis/google-cloud-go/releases/tag/v0.111.0)
+of experimental, opt-in support for OpenTelemetry tracing, the default tracing
+support in the clients above was changed from OpenCensus to OpenTelemetry, and
+the experimental OpenCensus support was marked as deprecated.
+
+On December 2nd, 2024, one year after the release of OpenTelemetry support, the
+experimental and deprecated support for OpenCensus tracing was removed.
+
+Please note that all Google Cloud Go clients currently provide experimental
+support for the propagation of both OpenCensus and OpenTelemetry trace context
+to their receiving endpoints. The experimental support for OpenCensus trace
+context propagation will be removed soon.
+
+Please refer to the following resources:
+
+* [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/)
+* [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus)
+
+#### OpenTelemetry
+
+The default experimental tracing support for OpenCensus is now deprecated in the
+Google Cloud client libraries for Go.
+
+On May 29, 2024, the default experimental tracing support in the Google Cloud
+client libraries for Go was changed from OpenCensus to OpenTelemetry.
+
+**Warning: OpenTelemetry-Go ensures
+[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility)
+with ONLY the current supported versions of the [Go
+language](https://go.dev/doc/devel/release#policy). This support may be narrower
+than the support that has been offered historically by the Go Client Libraries.
+Ensure that your Go runtime version is supported by the OpenTelemetry-Go
+[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility)
+policy before enabling OpenTelemetry instrumentation.**
+
+Please refer to the following resources:
+
+* [What is OpenTelemetry?](https://opentelemetry.io/docs/what-is-opentelemetry/)
+* [Cloud Trace - Go and OpenTelemetry](https://cloud.google.com/trace/docs/setup/go-ot)
+* On GCE, [use Ops Agent and OpenTelemetry](https://cloud.google.com/trace/docs/otlp)
+
+##### Configuring the OpenTelemetry-Go - OpenCensus Bridge
+
+To configure the OpenCensus bridge with OpenTelemetry and Cloud Trace:
+
+```go
+import (
+ "context"
+ "log"
+ "os"
+ texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
+ octrace "go.opencensus.io/trace"
+ "go.opentelemetry.io/contrib/detectors/gcp"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/bridge/opencensus"
+ "go.opentelemetry.io/otel/sdk/resource"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
+)
+
+func main() {
+ // Create exporter.
+ ctx := context.Background()
+ projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
+ exporter, err := texporter.New(texporter.WithProjectID(projectID))
+ if err != nil {
+ log.Fatalf("texporter.New: %v", err)
+ }
+ // Identify your application using resource detection
+ res, err := resource.New(ctx,
+ // Use the GCP resource detector to detect information about the GCP platform
+ resource.WithDetectors(gcp.NewDetector()),
+ // Keep the default detectors
+ resource.WithTelemetrySDK(),
+ // Add your own custom attributes to identify your application
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String("my-application"),
+ ),
+ )
+ if err != nil {
+ log.Fatalf("resource.New: %v", err)
+ }
+ // Create trace provider with the exporter.
+ //
+ // By default it uses AlwaysSample() which samples all traces.
+ // In a production environment or high QPS setup please use
+ // probabilistic sampling.
+ // Example:
+ // tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.TraceIDRatioBased(0.0001)), ...)
+ tp := sdktrace.NewTracerProvider(
+ sdktrace.WithBatcher(exporter),
+ sdktrace.WithResource(res),
+ )
+ defer tp.Shutdown(ctx) // flushes any pending spans, and closes connections.
+ otel.SetTracerProvider(tp)
+ tracer := otel.GetTracerProvider().Tracer("example.com/trace")
+ // Configure the OpenCensus tracer to use the bridge.
+ octrace.DefaultTracer = opencensus.NewTracer(tracer)
+ // Use otel tracer to create spans...
+}
+
+```
+
+##### Configuring context propagation
+
+In order to pass options to OpenTelemetry trace context propagation, follow the
+appropriate example for the client's underlying transport.
+
+###### Passing options in HTTP-based clients
+
+```go
+ctx := context.Background()
+trans, err := htransport.NewTransport(ctx,
+ http.DefaultTransport,
+ option.WithScopes(storage.ScopeFullControl),
+)
+if err != nil {
+ log.Fatal(err)
+}
+// An example of passing options to the otelhttp.Transport.
+otelOpts := otelhttp.WithFilter(func(r *http.Request) bool {
+ return r.URL.Path != "/ping"
+})
+hc := &http.Client{
+ Transport: otelhttp.NewTransport(trans, otelOpts),
+}
+client, err := storage.NewClient(ctx, option.WithHTTPClient(hc))
+```
+
+Note that scopes must be set manually in this user-configured solution.
+
+###### Passing options in gRPC-based clients
+
+```go
+projectID := "..."
+ctx := context.Background()
+
+// An example of passing options to grpc.WithStatsHandler.
+otelOpts := otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents)
+dialOpts := grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelOpts))
+
+ctx := context.Background()
+c, err := datastore.NewClient(ctx, projectID, option.WithGRPCDialOption(dialOpts))
+if err != nil {
+ log.Fatal(err)
+}
+defer c.Close()
+```
+
+### Metrics (experimental)
+
+The generated clients do not create metrics. Only the following hand-written
+clients create experimental OpenCensus metrics:
+
+* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery)
+* [cloud.google.com/go/pubsub](https://pkg.go.dev/cloud.google.com/go/pubsub)
+* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner)
+
+#### OpenTelemetry
+
+The transition of the experimental metrics in the clients above from OpenCensus
+to OpenTelemetry is still TBD.
\ No newline at end of file
diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go
new file mode 100644
index 000000000..4c75de36f
--- /dev/null
+++ b/vendor/cloud.google.com/go/doc.go
@@ -0,0 +1,289 @@
+// Copyright 2014 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package cloud is the root of the packages used to access Google Cloud
+Services. See https://pkg.go.dev/cloud.google.com/go#section-directories for a
+full list of sub-modules.
+
+# Client Options
+
+All clients in sub-packages are configurable via client options. These options
+are described here: https://pkg.go.dev/google.golang.org/api/option.
+
+# Endpoint Override
+
+Endpoint configuration is used to specify the URL to which requests are
+sent. It is used for services that support or require regional endpoints, as
+well as for other use cases such as [testing against fake servers].
+
+For example, the Vertex AI service recommends that you configure the endpoint to
+the location with the features you want that is closest to your physical
+location or the location of your users. There is no global endpoint for Vertex
+AI. See [Vertex AI - Locations] for more details. The following example
+demonstrates configuring a Vertex AI client with a regional endpoint:
+
+ ctx := context.Background()
+ endpoint := "us-central1-aiplatform.googleapis.com:443"
+ client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint))
+
+# Authentication and Authorization
+
+All of the clients support authentication via [Google Application Default Credentials],
+or by providing a JSON key file for a Service Account. See examples below.
+
+Google Application Default Credentials (ADC) is the recommended way to authorize
+and authenticate clients. For information on how to create and obtain
+Application Default Credentials, see
+https://cloud.google.com/docs/authentication/production. If you have your
+environment configured correctly you will not need to pass any extra information
+to the client libraries. Here is an example of a client using ADC to
+authenticate:
+
+ client, err := secretmanager.NewClient(context.Background())
+ if err != nil {
+ // TODO: handle error.
+ }
+ _ = client // Use the client.
+
+You can use a file with credentials to authenticate and authorize, such as a
+JSON key file associated with a Google service account. Service Account keys can
+be created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts.
+This example uses the Secret Manger client, but the same steps apply to the
+all other client libraries this package as well. Example:
+
+ client, err := secretmanager.NewClient(context.Background(),
+ option.WithCredentialsFile("/path/to/service-account-key.json"))
+ if err != nil {
+ // TODO: handle error.
+ }
+ _ = client // Use the client.
+
+In some cases (for instance, you don't want to store secrets on disk), you can
+create credentials from in-memory JSON and use the WithCredentials option.
+This example uses the Secret Manager client, but the same steps apply to
+all other client libraries as well. Note that scopes can be
+found at https://developers.google.com/identity/protocols/oauth2/scopes, and
+are also provided in all auto-generated libraries: for example,
+cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
+
+ ctx := context.Background()
+ // https://pkg.go.dev/cloud.google.com/go/auth/credentials
+ creds, err := credentials.DetectDefault(&credentials.DetectOptions{
+ Scopes: secretmanager.DefaultAuthScopes(),
+ CredentialsJSON: []byte("JSON creds")
+ }), secretmanager.DefaultAuthScopes()...)
+ if err != nil {
+ // TODO: handle error.
+ }
+ client, err := secretmanager.NewClient(ctx, option.WithAuthCredentials(creds))
+ if err != nil {
+ // TODO: handle error.
+ }
+ _ = client // Use the client.
+
+# Timeouts and Cancellation
+
+By default, non-streaming methods, like Create or Get, will have a default
+deadline applied to the context provided at call time, unless a context deadline
+is already set. Streaming methods have no default deadline and will run
+indefinitely. To set timeouts or arrange for cancellation, use
+[context]. Transient errors will be retried when correctness allows.
+
+Here is an example of setting a timeout for an RPC using
+[context.WithTimeout]:
+
+ ctx := context.Background()
+ // Do not set a timeout on the context passed to NewClient: dialing happens
+ // asynchronously, and the context is used to refresh credentials in the
+ // background.
+ client, err := secretmanager.NewClient(ctx)
+ if err != nil {
+ // TODO: handle error.
+ }
+ // Time out if it takes more than 10 seconds to create a dataset.
+ tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel() // Always call cancel.
+
+ req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
+ if err := client.DeleteSecret(tctx, req); err != nil {
+ // TODO: handle error.
+ }
+
+Here is an example of setting a timeout for an RPC using
+[github.com/googleapis/gax-go/v2.WithTimeout]:
+
+ ctx := context.Background()
+ // Do not set a timeout on the context passed to NewClient: dialing happens
+ // asynchronously, and the context is used to refresh credentials in the
+ // background.
+ client, err := secretmanager.NewClient(ctx)
+ if err != nil {
+ // TODO: handle error.
+ }
+
+ req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
+ // Time out if it takes more than 10 seconds to create a dataset.
+ if err := client.DeleteSecret(tctx, req, gax.WithTimeout(10*time.Second)); err != nil {
+ // TODO: handle error.
+ }
+
+Here is an example of how to arrange for an RPC to be canceled, use
+[context.WithCancel]:
+
+ ctx := context.Background()
+ // Do not cancel the context passed to NewClient: dialing happens asynchronously,
+ // and the context is used to refresh credentials in the background.
+ client, err := secretmanager.NewClient(ctx)
+ if err != nil {
+ // TODO: handle error.
+ }
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel() // Always call cancel.
+
+ // TODO: Make the cancel function available to whatever might want to cancel the
+ // call--perhaps a GUI button.
+ req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
+ if err := client.DeleteSecret(cctx, req); err != nil {
+ // TODO: handle error.
+ }
+
+Do not attempt to control the initial connection (dialing) of a service by
+setting a timeout on the context passed to NewClient. Dialing is non-blocking,
+so timeouts would be ineffective and would only interfere with credential
+refreshing, which uses the same context.
+
+# Headers
+
+Regardless of which transport is used, request headers can be set in the same
+way using [`callctx.SetHeaders`][setheaders].
+
+Here is a generic example:
+
+ // Set the header "key" to "value".
+ ctx := callctx.SetHeaders(context.Background(), "key", "value")
+
+ // Then use ctx in a subsequent request.
+ response, err := client.GetSecret(ctx, request)
+
+# Google-reserved headers
+
+There are a some header keys that Google reserves for internal use that must
+not be ovewritten. The following header keys are broadly considered reserved
+and should not be conveyed by client library users unless instructed to do so:
+
+* `x-goog-api-client`
+* `x-goog-request-params`
+
+Be sure to check the individual package documentation for other service-specific
+reserved headers. For example, Storage supports a specific auditing header that
+is mentioned in that [module's documentation][storagedocs].
+
+# Google Cloud system parameters
+
+Google Cloud services respect [system parameters][system parameters] that can be
+used to augment request and/or response behavior. For the most part, they are
+not needed when using one of the enclosed client libraries. However, those that
+may be necessary are made available via the [`callctx`][callctx] package. If not
+present there, consider opening an issue on that repo to request a new constant.
+
+# Connection Pooling
+
+Connection pooling differs in clients based on their transport. Cloud
+clients either rely on HTTP or gRPC transports to communicate
+with Google Cloud.
+
+Cloud clients that use HTTP rely on the underlying HTTP transport to cache
+connections for later re-use. These are cached to the http.MaxIdleConns
+and http.MaxIdleConnsPerHost settings in http.DefaultTransport by default.
+
+For gRPC clients, connection pooling is configurable. Users of Cloud Client
+Libraries may specify [google.golang.org/api/option.WithGRPCConnectionPool]
+as a client option to NewClient calls. This configures the underlying gRPC
+connections to be pooled and accessed in a round robin fashion.
+
+# Using the Libraries in Container environments(Docker)
+
+Minimal container images like Alpine lack CA certificates. This causes RPCs to
+appear to hang, because gRPC retries indefinitely. See
+https://github.com/googleapis/google-cloud-go/issues/928 for more information.
+
+# Debugging
+
+For tips on how to write tests against code that calls into our libraries check
+out our [Debugging Guide].
+
+# Testing
+
+For tips on how to write tests against code that calls into our libraries check
+out our [Testing Guide].
+
+# Inspecting errors
+
+Most of the errors returned by the generated clients are wrapped in an
+[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped
+into a [google.golang.org/grpc/status.Status] or
+[google.golang.org/api/googleapi.Error] depending on the transport used to make
+the call (gRPC or REST). Converting your errors to these types can be a useful
+way to get more information about what went wrong while debugging.
+
+APIError gives access to specific details in the error. The transport-specific
+errors can still be unwrapped using the APIError.
+
+ if err != nil {
+ var ae *apierror.APIError
+ if errors.As(err, &ae) {
+ log.Println(ae.Reason())
+ log.Println(ae.Details().Help.GetLinks())
+ }
+ // If a gRPC transport was used you can extract the
+ // google.golang.org/grpc/status.Status from the error
+ s := ae.GRPCStatus()
+ log.Println(s.Code())
+ }
+
+# Client Stability
+
+Semver is used to communicate stability of the sub-modules of this package.
+Note, some stable sub-modules do contain packages, and sometimes features, that
+are considered unstable. If something is unstable it will be explicitly labeled
+as such. Example of package does in an unstable package:
+
+ NOTE: This package is in beta. It is not stable, and may be subject to changes.
+
+Clients that contain alpha and beta in their import path may change or go away
+without notice.
+
+Clients marked stable will maintain compatibility with future versions for as
+long as we can reasonably sustain. Incompatible changes might be made in some
+situations, including:
+
+ - Security bugs may prompt backwards-incompatible changes.
+ - Situations in which components are no longer feasible to maintain without
+ making breaking changes, including removal.
+ - Parts of the client surface may be outright unstable and subject to change.
+ These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
+ and subject to change or removal without notice."
+
+[testing against fake servers]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes
+[Vertex AI - Locations]: https://cloud.google.com/vertex-ai/docs/general/locations
+[Google Application Default Credentials]: https://cloud.google.com/docs/authentication/external/set-up-adc
+[Testing Guide]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md
+[Debugging Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md
+[callctx]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#pkg-constants
+[setheaders]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#SetHeaders
+[storagedocs]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Sending_Custom_Headers
+[system parameters]: https://cloud.google.com/apis/docs/system-parameters
+*/
+package cloud // import "cloud.google.com/go"
diff --git a/vendor/cloud.google.com/go/go.work b/vendor/cloud.google.com/go/go.work
new file mode 100644
index 000000000..5e0a8cad3
--- /dev/null
+++ b/vendor/cloud.google.com/go/go.work
@@ -0,0 +1,186 @@
+go 1.23.0
+
+use (
+ .
+ ./accessapproval
+ ./accesscontextmanager
+ ./advisorynotifications
+ ./ai
+ ./aiplatform
+ ./alloydb
+ ./analytics
+ ./apigateway
+ ./apigeeconnect
+ ./apigeeregistry
+ ./apihub
+ ./apikeys
+ ./appengine
+ ./apphub
+ ./apps
+ ./area120
+ ./artifactregistry
+ ./asset
+ ./assuredworkloads
+ ./auth
+ ./auth/oauth2adapt
+ ./automl
+ ./backupdr
+ ./baremetalsolution
+ ./batch
+ ./beyondcorp
+ ./bigquery
+ ./bigtable
+ ./billing
+ ./binaryauthorization
+ ./certificatemanager
+ ./channel
+ ./chat
+ ./cloudbuild
+ ./cloudcontrolspartner
+ ./clouddms
+ ./cloudprofiler
+ ./cloudquotas
+ ./cloudtasks
+ ./commerce
+ ./compute
+ ./compute/metadata
+ ./confidentialcomputing
+ ./config
+ ./contactcenterinsights
+ ./container
+ ./containeranalysis
+ ./datacatalog
+ ./dataflow
+ ./dataform
+ ./datafusion
+ ./datalabeling
+ ./dataplex
+ ./dataproc
+ ./dataqna
+ ./datastore
+ ./datastream
+ ./deploy
+ ./developerconnect
+ ./dialogflow
+ ./discoveryengine
+ ./dlp
+ ./documentai
+ ./domains
+ ./edgecontainer
+ ./edgenetwork
+ ./errorreporting
+ ./essentialcontacts
+ ./eventarc
+ ./filestore
+ ./financialservices
+ ./firestore
+ ./functions
+ ./gkebackup
+ ./gkeconnect
+ ./gkehub
+ ./gkemulticloud
+ ./grafeas
+ ./gsuiteaddons
+ ./iam
+ ./iap
+ ./identitytoolkit
+ ./ids
+ ./internal/actions
+ ./internal/aliasfix
+ ./internal/aliasgen
+ ./internal/carver
+ ./internal/examples/fake
+ ./internal/examples/mock
+ ./internal/gapicgen
+ ./internal/generated/snippets
+ ./internal/godocfx
+ ./internal/postprocessor
+ ./internal/protoveneer
+ ./iot
+ ./kms
+ ./language
+ ./lifesciences
+ ./logging
+ ./longrunning
+ ./managedidentities
+ ./managedkafka
+ ./maps
+ ./mediatranslation
+ ./memcache
+ ./memorystore
+ ./metastore
+ ./migrationcenter
+ ./modelarmor
+ ./monitoring
+ ./netapp
+ ./networkconnectivity
+ ./networkmanagement
+ ./networksecurity
+ ./networkservices
+ ./notebooks
+ ./optimization
+ ./oracledatabase
+ ./orchestration
+ ./orgpolicy
+ ./osconfig
+ ./oslogin
+ ./parallelstore
+ ./parametermanager
+ ./phishingprotection
+ ./policysimulator
+ ./policytroubleshooter
+ ./privatecatalog
+ ./privilegedaccessmanager
+ ./profiler
+ ./pubsub
+ ./pubsublite
+ ./rapidmigrationassessment
+ ./recaptchaenterprise
+ ./recommendationengine
+ ./recommender
+ ./redis
+ ./resourcemanager
+ ./retail
+ ./run
+ ./scheduler
+ ./secretmanager
+ ./securesourcemanager
+ ./security
+ ./securitycenter
+ ./securitycentermanagement
+ ./securityposture
+ ./servicecontrol
+ ./servicedirectory
+ ./servicehealth
+ ./servicemanagement
+ ./serviceusage
+ ./shell
+ ./shopping
+ ./spanner
+ ./spanner/test/opentelemetry/test
+ ./speech
+ ./storage
+ ./storage/internal/benchmarks
+ ./storageinsights
+ ./storagetransfer
+ ./streetview
+ ./support
+ ./talent
+ ./telcoautomation
+ ./texttospeech
+ ./tpu
+ ./trace
+ ./translate
+ ./vertexai
+ ./video
+ ./videointelligence
+ ./vision
+ ./visionai
+ ./vmmigration
+ ./vmwareengine
+ ./vpcaccess
+ ./webrisk
+ ./websecurityscanner
+ ./workflows
+ ./workstations
+)
diff --git a/vendor/cloud.google.com/go/go.work.sum b/vendor/cloud.google.com/go/go.work.sum
new file mode 100644
index 000000000..6546ee83f
--- /dev/null
+++ b/vendor/cloud.google.com/go/go.work.sum
@@ -0,0 +1,253 @@
+cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU=
+cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k=
+cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU=
+cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc=
+cloud.google.com/go/gaming v1.10.1 h1:5qZmZEWzMf8GEFgm9NeC3bjFRpt7x4S6U7oLbxaf7N8=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6 h1:K72hopUosKG3ntOPNG4OzzbuhxGuVf06fa2la1/H/Ho=
+git.sr.ht/~sbinet/gg v0.3.1 h1:LNhjNn8DerC8f9DHLz6lS0YYul/b602DUxDgGkd/Aik=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.23.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs=
+github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9 h1:7kQgkwGRoLzC9K0oyXdJo7nve/bynv/KwUsxbiTlzAM=
+github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19 h1:iXUgAaqDcIUGbRoy2TdeofRG/j1zpGRSEmNK05T+bi8=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw=
+github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
+github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4=
+github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
+github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
+github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
+github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc=
+github.com/apache/thrift v0.17.0 h1:cMd2aj52n+8VoAtvSvLn4kDC3aZ6IAkBuqWQ2IDu7wo=
+github.com/aws/aws-sdk-go-v2 v1.16.10 h1:+yDD0tcuHRQZgqONkpDwzepqmElQaSlFPymHRHR9mrc=
+github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo=
+github.com/aws/aws-sdk-go-v2/config v1.15.9 h1:TK5yNEnFDQ9iaO04gJS/3Y+eW8BioQiCUafW75/Wc3Q=
+github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.12 h1:iShu6VaWZZZfUZvlGtRjl+g1lWk44g1QmiCTD4KS0jI=
+github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 h1:zZHPdM2x09/0F8D7XyVvQnP2/jaW7bEMmtcSCPYq/iI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 h1:U8DZvyFFesBmK62dYC6BRXm4Cd/wPP3aPcecu3xv/F4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 h1:GMp98usVW5tzQhxd26KWhoNQPlR2noIlfbzqjVGBhLU=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 h1:/spg6h3tG4pefphbvhpgdMtFMegSajPPSEJd1t8lnpc=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A=
+github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3 h1:PK6c4wYv3wbb88eH0X0FjJwRykEoJwAesuslNReY7iE=
+github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3/go.mod h1:BrAJyOMrnwzYVQcP5ziqlCpnEuFfkNppZLzqDyW/YTg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 h1:GkYtp4gi4wdWUV+pPetjk5y2aDxbr0t8n5OjVBwZdII=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 h1:HaIE5/TtKr66qZTJpvMifDxH4lRt2JZawbkLYOo1F+Y=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 h1:YU9UHPukkCCnETHEExOptF/BxPvGJKXO/NBx+RMQ/2A=
+github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4=
+github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag=
+github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/bazelbuild/rules_go v0.49.0 h1:5vCbuvy8Q11g41lseGJDc5vxhDjJtfxr6nM/IC4VmqM=
+github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs=
+github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
+github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
+github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw=
+github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
+github.com/chzyer/logex v1.2.0 h1:+eqR0HfOetur4tgnC8ftU5imRnhi4te+BadWS95c5AM=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=
+github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
+github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
+github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
+github.com/fullstorydev/grpcurl v1.8.7 h1:xJWosq3BQovQ4QrdPO72OrPiWuGgEsxY8ldYsJbPrqI=
+github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ=
+github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk=
+github.com/go-fonts/liberation v0.2.0 h1:jAkAWJP4S+OsrPLZM4/eC9iW7CtHy+HBXrEwZXWo5VM=
+github.com/go-fonts/stix v0.1.0 h1:UlZlgrvvmT/58o573ot7NFw0vZasZ5I6bcIft/oMdgg=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
+github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81 h1:6zl3BbBhdnMkpSj2YY30qV3gDcVBGtFgVsV3+/i+mKQ=
+github.com/go-pdf/fpdf v0.6.0 h1:MlgtGIfsdMEEQJr2le6b/HNr1ZlQwxyWr77r2aj2U/8=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
+github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
+github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
+github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g=
+github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA=
+github.com/google/go-pkcs11 v0.3.0 h1:PVRnTgtArZ3QQqTGtbtjtnIkzl2iY2kt24yqbrf7td8=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
+github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/hamba/avro/v2 v2.17.2 h1:6PKpEWzJfNnvBgn7m2/8WYaDOUASxfDU+Jyb4ojDgFY=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
+github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0 h1:mjZV3MTu2A5gwfT5G9IIiLGdwZNciyVq5qqnmJJZ2JI=
+github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0/go.mod h1:pMYMxVaKJqCDC1JUg/XbPJ4/fSazB25zORpFzqsIGIc=
+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
+github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE=
+github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/itchyny/gojq v0.12.9 h1:biKpbKwMxVYhCU1d6mR7qMr3f0Hn9F5k5YykCVb3gmM=
+github.com/itchyny/gojq v0.12.9/go.mod h1:T4Ip7AETUXeGpD+436m+UEl3m3tokRgajd5pRfsR5oE=
+github.com/itchyny/timefmt-go v0.1.4 h1:hFEfWVdwsEi+CY8xY2FtgWHGQaBaC3JeHd+cve0ynVM=
+github.com/itchyny/timefmt-go v0.1.4/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
+github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c=
+github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lyft/protoc-gen-star v0.6.1 h1:erE0rdztuaDq3bpGifD95wfoPrSZc95nGA6tbiNYh6M=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4 h1:sIXJOMrYnQZJu7OB7ANSF4MYri2fTEGIsRLz6LwI4xE=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/miekg/dns v1.1.33 h1:8KUVEKrUw2dmu1Ys0aWnkEJgoRaLAzNysfCh2KSMWiI=
+github.com/miekg/dns v1.1.33/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
+github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
+github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mmcloughlin/avo v0.5.0 h1:nAco9/aI9Lg2kiuROBY6BhCI/z0t5jEvJfjWbL8qXLU=
+github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ=
+github.com/phpdave11/gofpdi v1.0.13 h1:o61duiW8M9sMlkVXWlvP92sZJtGKENvW3VExs6dZukQ=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
+github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 h1:K1Xf3bKttbF+koVGaX5xngRIZ5bVjbmPnaxE/dR08uY=
+github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=
+github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/substrait-io/substrait-go v0.4.2 h1:buDnjsb3qAqTaNbOR7VKmNgXf4lYQxWEcnSGUWBtmN8=
+github.com/tidwall/gjson v1.14.2 h1:6BBkirS0rAHjumnjHF6qgy5d2YAJ1TLIaFE2lzfOLqo=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
+github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+go.opentelemetry.io/contrib/detectors/gcp v1.27.0/go.mod h1:amd+4uZxqJAUx7zI1JvygUtAc2EVWtQeyz8D+3161SQ=
+go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA=
+go.opentelemetry.io/otel/bridge/opencensus v0.40.0 h1:pqDiayRhBgoqy1vwnscik+TizcImJ58l053NScJyZso=
+go.opentelemetry.io/otel/bridge/opencensus v0.40.0/go.mod h1:1NvVHb6tLTe5A9qCYz+eErW0t8iPn4ZfR6tDKcqlGTM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU=
+go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw=
+go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+golang.org/x/image v0.0.0-20220302094943-723b81ca9867 h1:TcHcE0vrmgzNH1v3ppjcMGbhG5+9fMuvOmUYwNEF4q4=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
+golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
+golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=
+golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
+golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
+gonum.org/v1/plot v0.10.1 h1:dnifSs43YJuNMDzB7v8wV64O4ABBHReuAVAoBxqBqS4=
+google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/genproto v0.0.0-20230725213213-b022f6e96895/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
+google.golang.org/genproto/googleapis/api v0.0.0-20230725213213-b022f6e96895/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
+google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE=
+google.golang.org/genproto/googleapis/api v0.0.0-20250227231956-55c901821b1e h1:nsxey/MfoGzYNduN0NN/+hqP9iiCIYsrVbXb/8hjFM8=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240102182953-50ed04b92917/go.mod h1:O9TvT7A9NLgdqqF0JJXJ+axpaoYiEb8txGmkvy+AvLc=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240513163218-0867130af1f8/go.mod h1:RCpt0+3mpEDPldc32vXBM8ADXlFL95T8Chxx0nv0/zE=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20250115164207-1a7da9e5054f h1:NtrhicUU5+S4TaE5AurusJUYfAo/QB8a+kbIXipuJeI=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:35wIojE/F1ptq1nfNDNjtowabHoMSA2qQs7+smpCO5s=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
+lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
+modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
+modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
+modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
+modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
+modernc.org/libc v1.22.4 h1:wymSbZb0AlrjdAVX3cjreCHTPCpPARbQXNz6BHPzdwQ=
+modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
+modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/sqlite v1.21.2 h1:ixuUG0QS413Vfzyx6FWx6PYTmHaOegTY+hjzhn7L+a0=
+modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
+modernc.org/tcl v1.15.1 h1:mOQwiEK4p7HruMZcwKTZPw/aqtGM4aY00uzWhlKKYws=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
+rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md
index c4cacb03f..e2c753f54 100644
--- a/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,6 +1,119 @@
# Changes
+## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13)
+
+
+### Bug Fixes
+
+* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06)
+
+
+### Bug Fixes
+
+* **iam:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec))
+
+## [1.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.1...iam/v1.4.0) (2025-02-12)
+
+
+### Features
+
+* **iam/admin:** Regenerate client ([#11570](https://github.com/googleapis/google-cloud-go/issues/11570)) ([eab87d7](https://github.com/googleapis/google-cloud-go/commit/eab87d73bea884c636ec88f03b9aa90102a2833f)), refs [#8219](https://github.com/googleapis/google-cloud-go/issues/8219)
+
+## [1.3.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.0...iam/v1.3.1) (2025-01-02)
+
+
+### Bug Fixes
+
+* **iam:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [1.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.2...iam/v1.3.0) (2024-12-04)
+
+
+### Features
+
+* **iam:** Add ResourcePolicyMember to google/iam/v1 ([8dedb87](https://github.com/googleapis/google-cloud-go/commit/8dedb878c070cc1e92d62bb9b32358425e3ceffb))
+
+## [1.2.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.1...iam/v1.2.2) (2024-10-23)
+
+
+### Bug Fixes
+
+* **iam:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **iam:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+
+## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+
+## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20)
+
+
+### Features
+
+* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+
+## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08)
+
+
+### Bug Fixes
+
+* **iam:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24)
+
+
+### Bug Fixes
+
+* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10)
+
+
+### Bug Fixes
+
+* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
+## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01)
+
+
+### Bug Fixes
+
+* **iam:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+
+## [1.1.9](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.8...iam/v1.1.9) (2024-06-26)
+
+
+### Bug Fixes
+
+* **iam:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568))
+
+## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01)
+
+
+### Bug Fixes
+
+* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+
+## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14)
+
+
+### Bug Fixes
+
+* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30)
+
+
+### Bug Fixes
+
+* **iam:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698))
+
## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01)
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
index 85346a891..2b57ae3b8 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/iam/v1/iam_policy.proto
package iampb
@@ -65,11 +65,9 @@ type SetIamPolicyRequest struct {
func (x *SetIamPolicyRequest) Reset() {
*x = SetIamPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SetIamPolicyRequest) String() string {
@@ -80,7 +78,7 @@ func (*SetIamPolicyRequest) ProtoMessage() {}
func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -132,11 +130,9 @@ type GetIamPolicyRequest struct {
func (x *GetIamPolicyRequest) Reset() {
*x = GetIamPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetIamPolicyRequest) String() string {
@@ -147,7 +143,7 @@ func (*GetIamPolicyRequest) ProtoMessage() {}
func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -194,11 +190,9 @@ type TestIamPermissionsRequest struct {
func (x *TestIamPermissionsRequest) Reset() {
*x = TestIamPermissionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TestIamPermissionsRequest) String() string {
@@ -209,7 +203,7 @@ func (*TestIamPermissionsRequest) ProtoMessage() {}
func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -251,11 +245,9 @@ type TestIamPermissionsResponse struct {
func (x *TestIamPermissionsResponse) Reset() {
*x = TestIamPermissionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TestIamPermissionsResponse) String() string {
@@ -266,7 +258,7 @@ func (*TestIamPermissionsResponse) ProtoMessage() {}
func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -363,16 +355,15 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{
0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72,
0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d,
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49,
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
- 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d,
- 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31,
+ 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c,
+ 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -388,7 +379,7 @@ func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte {
}
var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{
+var file_google_iam_v1_iam_policy_proto_goTypes = []any{
(*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest
(*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest
(*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest
@@ -421,56 +412,6 @@ func file_google_iam_v1_iam_policy_proto_init() {
}
file_google_iam_v1_options_proto_init()
file_google_iam_v1_policy_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetIamPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetIamPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TestIamPermissionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TestIamPermissionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
index 68f8d761f..745de05ba 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/iam/v1/options.proto
package iampb
@@ -64,11 +64,9 @@ type GetPolicyOptions struct {
func (x *GetPolicyOptions) Reset() {
*x = GetPolicyOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_options_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_options_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetPolicyOptions) String() string {
@@ -79,7 +77,7 @@ func (*GetPolicyOptions) ProtoMessage() {}
func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_options_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -136,7 +134,7 @@ func file_google_iam_v1_options_proto_rawDescGZIP() []byte {
}
var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_iam_v1_options_proto_goTypes = []interface{}{
+var file_google_iam_v1_options_proto_goTypes = []any{
(*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions
}
var file_google_iam_v1_options_proto_depIdxs = []int32{
@@ -152,20 +150,6 @@ func file_google_iam_v1_options_proto_init() {
if File_google_iam_v1_options_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetPolicyOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
index eefd1d0e5..0eba15089 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/iam/v1/policy.proto
package iampb
@@ -289,11 +289,11 @@ type Policy struct {
// Any operation that affects conditional role bindings must specify version
// `3`. This requirement applies to the following operations:
//
- // * Getting a policy that includes a conditional role binding
- // * Adding a conditional role binding to a policy
- // * Changing a conditional role binding in a policy
- // * Removing any role binding, with or without a condition, from a policy
- // that includes conditions
+ // - Getting a policy that includes a conditional role binding
+ // - Adding a conditional role binding to a policy
+ // - Changing a conditional role binding in a policy
+ // - Removing any role binding, with or without a condition, from a policy
+ // that includes conditions
//
// **Important:** If you use IAM Conditions, you must include the `etag` field
// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
@@ -337,11 +337,9 @@ type Policy struct {
func (x *Policy) Reset() {
*x = Policy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Policy) String() string {
@@ -352,7 +350,7 @@ func (*Policy) ProtoMessage() {}
func (x *Policy) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -407,47 +405,43 @@ type Binding struct {
// Specifies the principals requesting access for a Google Cloud resource.
// `members` can have the following values:
//
- // * `allUsers`: A special identifier that represents anyone who is
- // on the internet; with or without a Google account.
- //
- // * `allAuthenticatedUsers`: A special identifier that represents anyone
- // who is authenticated with a Google account or a service account.
+ // - `allUsers`: A special identifier that represents anyone who is
+ // on the internet; with or without a Google account.
//
- // * `user:{emailid}`: An email address that represents a specific Google
- // account. For example, `alice@example.com` .
+ // - `allAuthenticatedUsers`: A special identifier that represents anyone
+ // who is authenticated with a Google account or a service account.
//
+ // - `user:{emailid}`: An email address that represents a specific Google
+ // account. For example, `alice@example.com` .
//
- // * `serviceAccount:{emailid}`: An email address that represents a service
- // account. For example, `my-other-app@appspot.gserviceaccount.com`.
+ // - `serviceAccount:{emailid}`: An email address that represents a service
+ // account. For example, `my-other-app@appspot.gserviceaccount.com`.
//
- // * `group:{emailid}`: An email address that represents a Google group.
- // For example, `admins@example.com`.
+ // - `group:{emailid}`: An email address that represents a Google group.
+ // For example, `admins@example.com`.
//
- // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a user that has been recently deleted. For
- // example, `alice@example.com?uid=123456789012345678901`. If the user is
- // recovered, this value reverts to `user:{emailid}` and the recovered user
- // retains the role in the binding.
+ // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a user that has been recently deleted. For
+ // example, `alice@example.com?uid=123456789012345678901`. If the user is
+ // recovered, this value reverts to `user:{emailid}` and the recovered user
+ // retains the role in the binding.
//
- // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
- // unique identifier) representing a service account that has been recently
- // deleted. For example,
- // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
- // If the service account is undeleted, this value reverts to
- // `serviceAccount:{emailid}` and the undeleted service account retains the
- // role in the binding.
- //
- // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a Google group that has been recently
- // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
- // the group is recovered, this value reverts to `group:{emailid}` and the
- // recovered group retains the role in the binding.
- //
- //
- // * `domain:{domain}`: The G Suite domain (primary) that represents all the
- // users of that domain. For example, `google.com` or `example.com`.
+ // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+ // unique identifier) representing a service account that has been recently
+ // deleted. For example,
+ // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+ // If the service account is undeleted, this value reverts to
+ // `serviceAccount:{emailid}` and the undeleted service account retains the
+ // role in the binding.
//
+ // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a Google group that has been recently
+ // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+ // the group is recovered, this value reverts to `group:{emailid}` and the
+ // recovered group retains the role in the binding.
//
+ // - `domain:{domain}`: The G Suite domain (primary) that represents all the
+ // users of that domain. For example, `google.com` or `example.com`.
Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
// The condition that is associated with this binding.
//
@@ -466,11 +460,9 @@ type Binding struct {
func (x *Binding) Reset() {
*x = Binding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Binding) String() string {
@@ -481,7 +473,7 @@ func (*Binding) ProtoMessage() {}
func (x *Binding) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -583,11 +575,9 @@ type AuditConfig struct {
func (x *AuditConfig) Reset() {
*x = AuditConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditConfig) String() string {
@@ -598,7 +588,7 @@ func (*AuditConfig) ProtoMessage() {}
func (x *AuditConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -662,11 +652,9 @@ type AuditLogConfig struct {
func (x *AuditLogConfig) Reset() {
*x = AuditLogConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditLogConfig) String() string {
@@ -677,7 +665,7 @@ func (*AuditLogConfig) ProtoMessage() {}
func (x *AuditLogConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -720,11 +708,9 @@ type PolicyDelta struct {
func (x *PolicyDelta) Reset() {
*x = PolicyDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PolicyDelta) String() string {
@@ -735,7 +721,7 @@ func (*PolicyDelta) ProtoMessage() {}
func (x *PolicyDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -788,11 +774,9 @@ type BindingDelta struct {
func (x *BindingDelta) Reset() {
*x = BindingDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BindingDelta) String() string {
@@ -803,7 +787,7 @@ func (*BindingDelta) ProtoMessage() {}
func (x *BindingDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -873,11 +857,9 @@ type AuditConfigDelta struct {
func (x *AuditConfigDelta) Reset() {
*x = AuditConfigDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditConfigDelta) String() string {
@@ -888,7 +870,7 @@ func (*AuditConfigDelta) ProtoMessage() {}
func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1040,7 +1022,7 @@ func file_google_iam_v1_policy_proto_rawDescGZIP() []byte {
var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
-var file_google_iam_v1_policy_proto_goTypes = []interface{}{
+var file_google_iam_v1_policy_proto_goTypes = []any{
(AuditLogConfig_LogType)(0), // 0: google.iam.v1.AuditLogConfig.LogType
(BindingDelta_Action)(0), // 1: google.iam.v1.BindingDelta.Action
(AuditConfigDelta_Action)(0), // 2: google.iam.v1.AuditConfigDelta.Action
@@ -1076,92 +1058,6 @@ func file_google_iam_v1_policy_proto_init() {
if File_google_iam_v1_policy_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Policy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Binding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuditConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuditLogConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PolicyDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BindingDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuditConfigDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go
new file mode 100644
index 000000000..c3339e26c
--- /dev/null
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go
@@ -0,0 +1,185 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/iam/v1/resource_policy_member.proto
+
+package iampb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Output-only policy member strings of a Google Cloud resource's built-in
+// identity.
+type ResourcePolicyMember struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IAM policy binding member referring to a Google Cloud resource by
+ // user-assigned name (https://google.aip.dev/122). If a resource is deleted
+ // and recreated with the same name, the binding will be applicable to the new
+ // resource.
+ //
+ // Example:
+ // `principal://parametermanager.googleapis.com/projects/12345/name/locations/us-central1-a/parameters/my-parameter`
+ IamPolicyNamePrincipal string `protobuf:"bytes,1,opt,name=iam_policy_name_principal,json=iamPolicyNamePrincipal,proto3" json:"iam_policy_name_principal,omitempty"`
+ // IAM policy binding member referring to a Google Cloud resource by
+ // system-assigned unique identifier (https://google.aip.dev/148#uid). If a
+ // resource is deleted and recreated with the same name, the binding will not
+ // be applicable to the new resource
+ //
+ // Example:
+ // `principal://parametermanager.googleapis.com/projects/12345/uid/locations/us-central1-a/parameters/a918fed5`
+ IamPolicyUidPrincipal string `protobuf:"bytes,2,opt,name=iam_policy_uid_principal,json=iamPolicyUidPrincipal,proto3" json:"iam_policy_uid_principal,omitempty"`
+}
+
+func (x *ResourcePolicyMember) Reset() {
+ *x = ResourcePolicyMember{}
+ mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourcePolicyMember) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourcePolicyMember) ProtoMessage() {}
+
+func (x *ResourcePolicyMember) ProtoReflect() protoreflect.Message {
+ mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourcePolicyMember.ProtoReflect.Descriptor instead.
+func (*ResourcePolicyMember) Descriptor() ([]byte, []int) {
+ return file_google_iam_v1_resource_policy_member_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourcePolicyMember) GetIamPolicyNamePrincipal() string {
+ if x != nil {
+ return x.IamPolicyNamePrincipal
+ }
+ return ""
+}
+
+func (x *ResourcePolicyMember) GetIamPolicyUidPrincipal() string {
+ if x != nil {
+ return x.IamPolicyUidPrincipal
+ }
+ return ""
+}
+
+var File_google_iam_v1_resource_policy_member_proto protoreflect.FileDescriptor
+
+var file_google_iam_v1_resource_policy_member_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f,
+ 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65,
+ 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a,
+ 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d,
+ 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x19, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70,
+ 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x69,
+ 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x69, 0x6e,
+ 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x18, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x5f, 0x75, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x15, 0x69, 0x61,
+ 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69,
+ 0x70, 0x61, 0x6c, 0x42, 0x87, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70,
+ 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_iam_v1_resource_policy_member_proto_rawDescOnce sync.Once
+ file_google_iam_v1_resource_policy_member_proto_rawDescData = file_google_iam_v1_resource_policy_member_proto_rawDesc
+)
+
+func file_google_iam_v1_resource_policy_member_proto_rawDescGZIP() []byte {
+ file_google_iam_v1_resource_policy_member_proto_rawDescOnce.Do(func() {
+ file_google_iam_v1_resource_policy_member_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_resource_policy_member_proto_rawDescData)
+ })
+ return file_google_iam_v1_resource_policy_member_proto_rawDescData
+}
+
+var file_google_iam_v1_resource_policy_member_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_iam_v1_resource_policy_member_proto_goTypes = []any{
+ (*ResourcePolicyMember)(nil), // 0: google.iam.v1.ResourcePolicyMember
+}
+var file_google_iam_v1_resource_policy_member_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_iam_v1_resource_policy_member_proto_init() }
+func file_google_iam_v1_resource_policy_member_proto_init() {
+ if File_google_iam_v1_resource_policy_member_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_iam_v1_resource_policy_member_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_iam_v1_resource_policy_member_proto_goTypes,
+ DependencyIndexes: file_google_iam_v1_resource_policy_member_proto_depIdxs,
+ MessageInfos: file_google_iam_v1_resource_policy_member_proto_msgTypes,
+ }.Build()
+ File_google_iam_v1_resource_policy_member_proto = out.File
+ file_google_iam_v1_resource_policy_member_proto_rawDesc = nil
+ file_google_iam_v1_resource_policy_member_proto_goTypes = nil
+ file_google_iam_v1_resource_policy_member_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index ae8a1fc14..d72e82329 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -39,6 +39,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/ai/generativelanguage/apiv1alpha": {
+ "api_shortname": "generativelanguage",
+ "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1alpha",
+ "description": "Generative Language API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/ai/generativelanguage/apiv1beta": {
"api_shortname": "generativelanguage",
"distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta",
@@ -179,6 +189,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/apihub/apiv1": {
+ "api_shortname": "apihub",
+ "distribution_name": "cloud.google.com/go/apihub/apiv1",
+ "description": "API hub API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apihub/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/apikeys/apiv2": {
"api_shortname": "apikeys",
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
@@ -199,6 +219,36 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/apphub/apiv1": {
+ "api_shortname": "apphub",
+ "distribution_name": "cloud.google.com/go/apphub/apiv1",
+ "description": "App Hub API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apphub/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/apps/events/subscriptions/apiv1": {
+ "api_shortname": "workspaceevents",
+ "distribution_name": "cloud.google.com/go/apps/events/subscriptions/apiv1",
+ "description": "Google Workspace Events API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/events/subscriptions/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/apps/meet/apiv2": {
+ "api_shortname": "meet",
+ "distribution_name": "cloud.google.com/go/apps/meet/apiv2",
+ "description": "Google Meet API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/apps/meet/apiv2beta": {
"api_shortname": "meet",
"distribution_name": "cloud.google.com/go/apps/meet/apiv2beta",
@@ -309,6 +359,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/backupdr/apiv1": {
+ "api_shortname": "backupdr",
+ "distribution_name": "cloud.google.com/go/backupdr/apiv1",
+ "description": "Backup and DR Service API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/baremetalsolution/apiv2": {
"api_shortname": "baremetalsolution",
"distribution_name": "cloud.google.com/go/baremetalsolution/apiv2",
@@ -486,7 +546,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/migration/apiv2alpha": {
@@ -519,6 +579,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/bigquery/storage/apiv1alpha": {
+ "api_shortname": "bigquerystorage",
+ "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha",
+ "description": "BigQuery Storage API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/bigquery/storage/apiv1beta1": {
"api_shortname": "bigquerystorage",
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1",
@@ -549,6 +619,26 @@
"release_level": "stable",
"library_type": "GAPIC_MANUAL"
},
+ "cloud.google.com/go/bigtable/admin/apiv2": {
+ "api_shortname": "bigtableadmin",
+ "distribution_name": "cloud.google.com/go/bigtable/admin/apiv2",
+ "description": "Cloud Bigtable Admin API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/admin/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/bigtable/apiv2": {
+ "api_shortname": "bigtable",
+ "distribution_name": "cloud.google.com/go/bigtable/apiv2",
+ "description": "Cloud Bigtable API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/billing/apiv1": {
"api_shortname": "cloudbilling",
"distribution_name": "cloud.google.com/go/billing/apiv1",
@@ -619,6 +709,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/chat/apiv1": {
+ "api_shortname": "chat",
+ "distribution_name": "cloud.google.com/go/chat/apiv1",
+ "description": "Google Chat API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/chat/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/cloudbuild/apiv1/v2": {
"api_shortname": "cloudbuild",
"distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2",
@@ -639,6 +739,26 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/cloudcontrolspartner/apiv1": {
+ "api_shortname": "cloudcontrolspartner",
+ "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1",
+ "description": "Cloud Controls Partner API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/cloudcontrolspartner/apiv1beta": {
+ "api_shortname": "cloudcontrolspartner",
+ "distribution_name": "cloud.google.com/go/cloudcontrolspartner/apiv1beta",
+ "description": "Cloud Controls Partner API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/clouddms/apiv1": {
"api_shortname": "datamigration",
"distribution_name": "cloud.google.com/go/clouddms/apiv1",
@@ -666,6 +786,16 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/cloudquotas/apiv1beta": {
+ "api_shortname": "cloudquotas",
+ "distribution_name": "cloud.google.com/go/cloudquotas/apiv1beta",
+ "description": "Cloud Quotas API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1beta",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
@@ -706,7 +836,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/compute/apiv1": {
@@ -756,7 +886,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/contactcenterinsights/apiv1": {
@@ -829,16 +959,6 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/dataform/apiv1alpha2": {
- "api_shortname": "dataform",
- "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2",
- "description": "Dataform API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/dataform/apiv1beta1": {
"api_shortname": "dataform",
"distribution_name": "cloud.google.com/go/dataform/apiv1beta1",
@@ -919,6 +1039,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/datastore/apiv1": {
+ "api_shortname": "datastore",
+ "distribution_name": "cloud.google.com/go/datastore/apiv1",
+ "description": "Cloud Datastore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/datastream/apiv1": {
"api_shortname": "datastream",
"distribution_name": "cloud.google.com/go/datastream/apiv1",
@@ -959,6 +1089,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/developerconnect/apiv1": {
+ "api_shortname": "developerconnect",
+ "distribution_name": "cloud.google.com/go/developerconnect/apiv1",
+ "description": "Developer Connect API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/dialogflow/apiv2": {
"api_shortname": "dialogflow",
"distribution_name": "cloud.google.com/go/dialogflow/apiv2",
@@ -1032,7 +1172,7 @@
"cloud.google.com/go/dlp/apiv2": {
"api_shortname": "dlp",
"distribution_name": "cloud.google.com/go/dlp/apiv2",
- "description": "Cloud Data Loss Prevention (DLP)",
+ "description": "Sensitive Data Protection (DLP)",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2",
@@ -1086,7 +1226,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/errorreporting": {
@@ -1149,6 +1289,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/financialservices/apiv1": {
+ "api_shortname": "financialservices",
+ "distribution_name": "cloud.google.com/go/financialservices/apiv1",
+ "description": "Financial Services API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/financialservices/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/firestore": {
"api_shortname": "firestore",
"distribution_name": "cloud.google.com/go/firestore",
@@ -1229,6 +1379,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/gkeconnect/gateway/apiv1": {
+ "api_shortname": "connectgateway",
+ "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1",
+ "description": "Connect Gateway API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/gkeconnect/gateway/apiv1beta1": {
"api_shortname": "connectgateway",
"distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1",
@@ -1252,7 +1412,7 @@
"cloud.google.com/go/gkemulticloud/apiv1": {
"api_shortname": "gkemulticloud",
"distribution_name": "cloud.google.com/go/gkemulticloud/apiv1",
- "description": "Anthos Multi-Cloud API",
+ "description": "GKE Multi-Cloud API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1",
@@ -1262,7 +1422,7 @@
"cloud.google.com/go/gsuiteaddons/apiv1": {
"api_shortname": "gsuiteaddons",
"distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1",
- "description": "Google Workspace Add-ons API",
+ "description": "Google Workspace add-ons API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1",
@@ -1319,6 +1479,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/identitytoolkit/apiv2": {
+ "api_shortname": "identitytoolkit",
+ "distribution_name": "cloud.google.com/go/identitytoolkit/apiv2",
+ "description": "Identity Toolkit API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/identitytoolkit/latest/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/ids/apiv1": {
"api_shortname": "ids",
"distribution_name": "cloud.google.com/go/ids/apiv1",
@@ -1439,6 +1609,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/managedkafka/apiv1": {
+ "api_shortname": "managedkafka",
+ "distribution_name": "cloud.google.com/go/managedkafka/apiv1",
+ "description": "Managed Service for Apache Kafka API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/maps/addressvalidation/apiv1": {
"api_shortname": "addressvalidation",
"distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1",
@@ -1449,6 +1629,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/maps/areainsights/apiv1": {
+ "api_shortname": "areainsights",
+ "distribution_name": "cloud.google.com/go/maps/areainsights/apiv1",
+ "description": "Places Insights API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/areainsights/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/maps/fleetengine/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1",
@@ -1469,16 +1659,6 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": {
- "api_shortname": "mapsplatformdatasets",
- "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha",
- "description": "Maps Platform Datasets API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/maps/places/apiv1": {
"api_shortname": "places",
"distribution_name": "cloud.google.com/go/maps/places/apiv1",
@@ -1486,7 +1666,17 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1",
- "release_level": "stable",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/maps/routeoptimization/apiv1": {
+ "api_shortname": "routeoptimization",
+ "distribution_name": "cloud.google.com/go/maps/routeoptimization/apiv1",
+ "description": "Route Optimization API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routeoptimization/apiv1",
+ "release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/maps/routing/apiv2": {
@@ -1499,6 +1689,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/maps/solar/apiv1": {
+ "api_shortname": "solar",
+ "distribution_name": "cloud.google.com/go/maps/solar/apiv1",
+ "description": "Solar API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/mediatranslation/apiv1beta1": {
"api_shortname": "mediatranslation",
"distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1",
@@ -1529,6 +1729,26 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/memorystore/apiv1": {
+ "api_shortname": "memorystore",
+ "distribution_name": "cloud.google.com/go/memorystore/apiv1",
+ "description": "Memorystore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memorystore/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/memorystore/apiv1beta": {
+ "api_shortname": "memorystore",
+ "distribution_name": "cloud.google.com/go/memorystore/apiv1beta",
+ "description": "Memorystore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memorystore/latest/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/metastore/apiv1": {
"api_shortname": "metastore",
"distribution_name": "cloud.google.com/go/metastore/apiv1",
@@ -1566,6 +1786,16 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/modelarmor/apiv1": {
+ "api_shortname": "modelarmor",
+ "distribution_name": "cloud.google.com/go/modelarmor/apiv1",
+ "description": "Model Armor API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/modelarmor/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
@@ -1576,7 +1806,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2",
- "release_level": "stable",
+ "release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/monitoring/dashboard/apiv1": {
@@ -1606,7 +1836,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/networkconnectivity/apiv1": {
@@ -1649,6 +1879,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/networkservices/apiv1": {
+ "api_shortname": "networkservices",
+ "distribution_name": "cloud.google.com/go/networkservices/apiv1",
+ "description": "Network Services API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/notebooks/apiv1": {
"api_shortname": "notebooks",
"distribution_name": "cloud.google.com/go/notebooks/apiv1",
@@ -1689,6 +1929,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/oracledatabase/apiv1": {
+ "api_shortname": "oracledatabase",
+ "distribution_name": "cloud.google.com/go/oracledatabase/apiv1",
+ "description": "Oracle Database@Google Cloud API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oracledatabase/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/orchestration/airflow/service/apiv1": {
"api_shortname": "composer",
"distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1",
@@ -1779,6 +2029,36 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/parallelstore/apiv1": {
+ "api_shortname": "parallelstore",
+ "distribution_name": "cloud.google.com/go/parallelstore/apiv1",
+ "description": "Parallelstore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/parallelstore/apiv1beta": {
+ "api_shortname": "parallelstore",
+ "distribution_name": "cloud.google.com/go/parallelstore/apiv1beta",
+ "description": "Parallelstore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/parametermanager/apiv1": {
+ "api_shortname": "parametermanager",
+ "distribution_name": "cloud.google.com/go/parametermanager/apiv1",
+ "description": "Parameter Manager API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parametermanager/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/phishingprotection/apiv1beta1": {
"api_shortname": "phishingprotection",
"distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1",
@@ -1829,6 +2109,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/privilegedaccessmanager/apiv1": {
+ "api_shortname": "privilegedaccessmanager",
+ "distribution_name": "cloud.google.com/go/privilegedaccessmanager/apiv1",
+ "description": "Privileged Access Manager API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privilegedaccessmanager/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/profiler": {
"api_shortname": "cloudprofiler",
"distribution_name": "cloud.google.com/go/profiler",
@@ -1966,7 +2256,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/resourcemanager/apiv2": {
@@ -1989,20 +2279,10 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/resourcesettings/apiv1": {
- "api_shortname": "resourcesettings",
- "distribution_name": "cloud.google.com/go/resourcesettings/apiv1",
- "description": "Resource Settings API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1",
- "release_level": "stable",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/retail/apiv2": {
"api_shortname": "retail",
"distribution_name": "cloud.google.com/go/retail/apiv2",
- "description": "Retail API",
+ "description": "Vertex AI Search for Retail API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2",
@@ -2012,7 +2292,7 @@
"cloud.google.com/go/retail/apiv2alpha": {
"api_shortname": "retail",
"distribution_name": "cloud.google.com/go/retail/apiv2alpha",
- "description": "Retail API",
+ "description": "Vertex AI Search for Retail API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha",
@@ -2022,7 +2302,7 @@
"cloud.google.com/go/retail/apiv2beta": {
"api_shortname": "retail",
"distribution_name": "cloud.google.com/go/retail/apiv2beta",
- "description": "Retail API",
+ "description": "Vertex AI Search for Retail API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta",
@@ -2079,6 +2359,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/secretmanager/apiv1beta2": {
+ "api_shortname": "secretmanager",
+ "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta2",
+ "description": "Secret Manager API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1beta2",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/securesourcemanager/apiv1": {
"api_shortname": "securesourcemanager",
"distribution_name": "cloud.google.com/go/securesourcemanager/apiv1",
@@ -2086,7 +2376,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/security/privateca/apiv1": {
@@ -2099,6 +2389,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/security/publicca/apiv1": {
+ "api_shortname": "publicca",
+ "distribution_name": "cloud.google.com/go/security/publicca/apiv1",
+ "description": "Public Certificate Authority API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/security/publicca/apiv1beta1": {
"api_shortname": "publicca",
"distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1",
@@ -2139,6 +2439,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/securitycenter/apiv2": {
+ "api_shortname": "securitycenter",
+ "distribution_name": "cloud.google.com/go/securitycenter/apiv2",
+ "description": "Security Command Center API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv2",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/securitycenter/settings/apiv1beta1": {
"api_shortname": "securitycenter",
"distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1",
@@ -2152,10 +2462,20 @@
"cloud.google.com/go/securitycentermanagement/apiv1": {
"api_shortname": "securitycentermanagement",
"distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1",
- "description": "Security Center Management API",
+ "description": "Security Command Center Management API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/securityposture/apiv1": {
+ "api_shortname": "securityposture",
+ "distribution_name": "cloud.google.com/go/securityposture/apiv1",
+ "description": "Security Posture API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securityposture/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
@@ -2189,6 +2509,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/servicehealth/apiv1": {
+ "api_shortname": "servicehealth",
+ "distribution_name": "cloud.google.com/go/servicehealth/apiv1",
+ "description": "Service Health API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicehealth/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/servicemanagement/apiv1": {
"api_shortname": "servicemanagement",
"distribution_name": "cloud.google.com/go/servicemanagement/apiv1",
@@ -2229,6 +2559,36 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/conversions/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/shopping/merchant/inventories/apiv1beta": {
"api_shortname": "merchantapi",
"distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta",
@@ -2239,6 +2599,76 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/shopping/merchant/lfp/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/lfp/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/lfp/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/notifications/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/notifications/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/notifications/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/products/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/quota/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/quota/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/reports/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/reports/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reports/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/shopping/merchant/reviews/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/reviews/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reviews/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/spanner": {
"api_shortname": "spanner",
"distribution_name": "cloud.google.com/go/spanner",
@@ -2329,6 +2759,16 @@
"release_level": "stable",
"library_type": "GAPIC_MANUAL"
},
+ "cloud.google.com/go/storage/control/apiv2": {
+ "api_shortname": "storage",
+ "distribution_name": "cloud.google.com/go/storage/control/apiv2",
+ "description": "Storage Control API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/storage/internal/apiv2": {
"api_shortname": "storage",
"distribution_name": "cloud.google.com/go/storage/internal/apiv2",
@@ -2359,6 +2799,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/streetview/publish/apiv1": {
+ "api_shortname": "streetviewpublish",
+ "distribution_name": "cloud.google.com/go/streetview/publish/apiv1",
+ "description": "Street View Publish API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/streetview/latest/publish/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/support/apiv2": {
"api_shortname": "cloudsupport",
"distribution_name": "cloud.google.com/go/support/apiv2",
@@ -2396,7 +2846,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/texttospeech/apiv1": {
@@ -2529,6 +2979,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/visionai/apiv1": {
+ "api_shortname": "visionai",
+ "distribution_name": "cloud.google.com/go/visionai/apiv1",
+ "description": "Vision AI API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/visionai/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/vmmigration/apiv1": {
"api_shortname": "vmmigration",
"distribution_name": "cloud.google.com/go/vmmigration/apiv1",
@@ -2636,7 +3096,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/workstations/apiv1beta": {
diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh
new file mode 100644
index 000000000..59c190653
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/gen_info.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+# Script to generate info.go files with methods for all clients.
+
+if [[ $# != 2 ]]; then
+ echo >&2 "usage: $0 DIR PACKAGE"
+ exit 1
+fi
+
+outfile=info.go
+
+cd $1
+
+cat <<'EOF' > $outfile
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Also passes any
+// provided key-value pairs. Intended for use by Google-written clients.
+//
+// Internal use only.
+
+EOF
+
+echo -e >> $outfile "package $2\n"
+
+
+awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ {
+ printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3);
+ printf(" c.setGoogleClientInfo(keyval...)\n");
+ printf("}\n\n");
+}' *_client.go >> $outfile
+
+gofmt -w $outfile
diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go
index eabed000f..fcff2a7e4 100644
--- a/vendor/cloud.google.com/go/internal/trace/trace.go
+++ b/vendor/cloud.google.com/go/internal/trace/trace.go
@@ -18,115 +18,39 @@ import (
"context"
"errors"
"fmt"
- "os"
- "strings"
- "go.opencensus.io/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- ottrace "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace"
"google.golang.org/api/googleapi"
- "google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
const (
- // TelemetryPlatformTracingOpenCensus is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenCensus tracing.
- TelemetryPlatformTracingOpenCensus = "opencensus"
- // TelemetryPlatformTracingOpenCensus is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenTelemetry tracing.
- TelemetryPlatformTracingOpenTelemetry = "opentelemetry"
- // TelemetryPlatformTracingOpenCensus is the name of the environment
- // variable that can be set to change the default tracing from OpenCensus
- // to OpenTelemetry.
- TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
- // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer
- // when it is obtained from the OpenTelemetry TracerProvider.
OpenTelemetryTracerName = "cloud.google.com/go"
)
-var (
- // OpenTelemetryTracingEnabled is true if the environment variable
- // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
- // case-insensitive value "opentelemetry".
- //
- // Do not access directly. Use instead IsOpenTelemetryTracingEnabled or
- // IsOpenCensusTracingEnabled. Intended for use only in unit tests. Restore
- // original value after each test.
- OpenTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
- os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry)
-)
-
-// IsOpenCensusTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the
-// case-insensitive value "opentelemetry".
-func IsOpenCensusTracingEnabled() bool {
- return !IsOpenTelemetryTracingEnabled()
-}
-
-// IsOpenTelemetryTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
-// case-insensitive value "opentelemetry".
-func IsOpenTelemetryTracingEnabled() bool {
- return OpenTelemetryTracingEnabled
-}
-
-// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until May 29, 2024, at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// StartSpan adds an OpenTelemetry span to the trace with the given name.
+//
+// The default experimental tracing support for OpenCensus is now deprecated in
+// the Google Cloud client libraries for Go.
func StartSpan(ctx context.Context, name string) context.Context {
- if IsOpenTelemetryTracingEnabled() {
- ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
- } else {
- ctx, _ = trace.StartSpan(ctx, name)
- }
+ ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
return ctx
}
-// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until May 29, 2024, at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// EndSpan ends an OpenTelemetry span with the given error.
+//
+// The default experimental tracing support for OpenCensus is now deprecated in
+// the Google Cloud client libraries for Go.
func EndSpan(ctx context.Context, err error) {
- if IsOpenTelemetryTracingEnabled() {
- span := ottrace.SpanFromContext(ctx)
- if err != nil {
- span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
- span.RecordError(err)
- }
- span.End()
- } else {
- span := trace.FromContext(ctx)
- if err != nil {
- span.SetStatus(toStatus(err))
- }
- span.End()
- }
-}
-
-// toStatus converts an error to an equivalent OpenCensus status.
-func toStatus(err error) trace.Status {
- var err2 *googleapi.Error
- if ok := errors.As(err, &err2); ok {
- return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
- } else if s, ok := status.FromError(err); ok {
- return trace.Status{Code: int32(s.Code()), Message: s.Message()}
- } else {
- return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
+ span := trace.SpanFromContext(ctx)
+ if err != nil {
+ span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
+ span.RecordError(err)
}
+ span.End()
}
// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description.
@@ -141,84 +65,13 @@ func toOpenTelemetryStatusDescription(err error) string {
}
}
-// TODO(deklerk): switch to using OpenCensus function when it becomes available.
-// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
-func httpStatusCodeToOCCode(httpStatusCode int) int32 {
- switch httpStatusCode {
- case 200:
- return int32(code.Code_OK)
- case 499:
- return int32(code.Code_CANCELLED)
- case 500:
- return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
- case 400:
- return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
- case 504:
- return int32(code.Code_DEADLINE_EXCEEDED)
- case 404:
- return int32(code.Code_NOT_FOUND)
- case 409:
- return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
- case 403:
- return int32(code.Code_PERMISSION_DENIED)
- case 401:
- return int32(code.Code_UNAUTHENTICATED)
- case 429:
- return int32(code.Code_RESOURCE_EXHAUSTED)
- case 501:
- return int32(code.Code_UNIMPLEMENTED)
- case 503:
- return int32(code.Code_UNAVAILABLE)
- default:
- return int32(code.Code_UNKNOWN)
- }
-}
-
-// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then:
-// * calls Span.Annotatef if OpenCensus is enabled; or
-// * calls Span.AddEvent if OpenTelemetry is enabled.
-//
-// If IsOpenCensusTracingEnabled returns true, the expected span must be an
-// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected
-// span must be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until May 29, 2024, at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// TracePrintf retrieves the current OpenTelemetry span from context, then calls
+// Span.AddEvent. The expected span must be an OpenTelemetry span. The default
+// experimental tracing support for OpenCensus is now deprecated in the Google
+// Cloud client libraries for Go.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
- if IsOpenTelemetryTracingEnabled() {
- attrs := otAttrs(attrMap)
- ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...))
- } else {
- attrs := ocAttrs(attrMap)
- // TODO: (odeke-em): perhaps just pass around spans due to the cost
- // incurred from using trace.FromContext(ctx) yet we could avoid
- // throwing away the work done by ctx, span := trace.StartSpan.
- trace.FromContext(ctx).Annotatef(attrs, format, args...)
- }
-}
-
-// ocAttrs converts a generic map to OpenCensus attributes.
-func ocAttrs(attrMap map[string]interface{}) []trace.Attribute {
- var attrs []trace.Attribute
- for k, v := range attrMap {
- var a trace.Attribute
- switch v := v.(type) {
- case string:
- a = trace.StringAttribute(k, v)
- case bool:
- a = trace.BoolAttribute(k, v)
- case int:
- a = trace.Int64Attribute(k, int64(v))
- case int64:
- a = trace.Int64Attribute(k, v)
- default:
- a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
- }
- attrs = append(attrs, a)
- }
- return attrs
+ attrs := otAttrs(attrMap)
+ trace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), trace.WithAttributes(attrs...))
}
// otAttrs converts a generic map to OpenTelemetry attributes.
diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go
new file mode 100644
index 000000000..22b9eaa14
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go
@@ -0,0 +1,1185 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package kms
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/http"
+ "net/url"
+ "time"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ httptransport "google.golang.org/api/transport/http"
+ locationpb "google.golang.org/genproto/googleapis/cloud/location"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+var newAutokeyAdminClientHook clientHook
+
+// AutokeyAdminCallOptions contains the retry settings for each method of AutokeyAdminClient.
+type AutokeyAdminCallOptions struct {
+ UpdateAutokeyConfig []gax.CallOption
+ GetAutokeyConfig []gax.CallOption
+ ShowEffectiveAutokeyConfig []gax.CallOption
+ GetLocation []gax.CallOption
+ ListLocations []gax.CallOption
+ GetIamPolicy []gax.CallOption
+ SetIamPolicy []gax.CallOption
+ TestIamPermissions []gax.CallOption
+ GetOperation []gax.CallOption
+}
+
+func defaultAutokeyAdminGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultAutokeyAdminCallOptions() *AutokeyAdminCallOptions {
+ return &AutokeyAdminCallOptions{
+ UpdateAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ShowEffectiveAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetLocation: []gax.CallOption{},
+ ListLocations: []gax.CallOption{},
+ GetIamPolicy: []gax.CallOption{},
+ SetIamPolicy: []gax.CallOption{},
+ TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ }
+}
+
+func defaultAutokeyAdminRESTCallOptions() *AutokeyAdminCallOptions {
+ return &AutokeyAdminCallOptions{
+ UpdateAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ShowEffectiveAutokeyConfig: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetLocation: []gax.CallOption{},
+ ListLocations: []gax.CallOption{},
+ GetIamPolicy: []gax.CallOption{},
+ SetIamPolicy: []gax.CallOption{},
+ TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ }
+}
+
+// internalAutokeyAdminClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API.
+type internalAutokeyAdminClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ UpdateAutokeyConfig(context.Context, *kmspb.UpdateAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error)
+ GetAutokeyConfig(context.Context, *kmspb.GetAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error)
+ ShowEffectiveAutokeyConfig(context.Context, *kmspb.ShowEffectiveAutokeyConfigRequest, ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error)
+ GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error)
+ ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
+}
+
+// AutokeyAdminClient is a client for interacting with Cloud Key Management Service (KMS) API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// Provides interfaces for managing Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level
+// configurations. A configuration is inherited by all descendent projects. A
+// configuration at one folder overrides any other configurations in its
+// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
+// Autokey, so that users working in a descendant project can request
+// provisioned CryptoKeys, ready for Customer
+// Managed Encryption Key (CMEK) use, on-demand.
+type AutokeyAdminClient struct {
+ // The internal transport-dependent client.
+ internalClient internalAutokeyAdminClient
+
+ // The call options for this service.
+ CallOptions *AutokeyAdminCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AutokeyAdminClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AutokeyAdminClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *AutokeyAdminClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// UpdateAutokeyConfig updates the AutokeyConfig for a
+// folder. The caller must have both cloudkms.autokeyConfigs.update
+// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy
+// permission on the provided key project. A
+// KeyHandle creation in the folder’s
+// descendant projects will use this configuration to determine where to
+// create the resulting CryptoKey.
+func (c *AutokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ return c.internalClient.UpdateAutokeyConfig(ctx, req, opts...)
+}
+
+// GetAutokeyConfig returns the AutokeyConfig for a
+// folder.
+func (c *AutokeyAdminClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ return c.internalClient.GetAutokeyConfig(ctx, req, opts...)
+}
+
+// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project.
+func (c *AutokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
+ return c.internalClient.ShowEffectiveAutokeyConfig(ctx, req, opts...)
+}
+
+// GetLocation gets information about a location.
+func (c *AutokeyAdminClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ return c.internalClient.GetLocation(ctx, req, opts...)
+}
+
+// ListLocations lists information about the supported locations for this service.
+func (c *AutokeyAdminClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ return c.internalClient.ListLocations(ctx, req, opts...)
+}
+
+// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
+// if the resource exists and does not have a policy set.
+func (c *AutokeyAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.GetIamPolicy(ctx, req, opts...)
+}
+
+// SetIamPolicy sets the access control policy on the specified resource. Replaces
+// any existing policy.
+//
+// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
+// errors.
+func (c *AutokeyAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.SetIamPolicy(ctx, req, opts...)
+}
+
+// TestIamPermissions returns permissions that a caller has on the specified resource. If the
+// resource does not exist, this will return an empty set of
+// permissions, not a NOT_FOUND error.
+//
+// Note: This operation is designed to be used for building
+// permission-aware UIs and command-line tools, not for authorization
+// checking. This operation may “fail open” without warning.
+func (c *AutokeyAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ return c.internalClient.TestIamPermissions(ctx, req, opts...)
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *AutokeyAdminClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
+// autokeyAdminGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type autokeyAdminGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing AutokeyAdminClient
+ CallOptions **AutokeyAdminCallOptions
+
+ // The gRPC API client.
+ autokeyAdminClient kmspb.AutokeyAdminClient
+
+ operationsClient longrunningpb.OperationsClient
+
+ iamPolicyClient iampb.IAMPolicyClient
+
+ locationsClient locationpb.LocationsClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewAutokeyAdminClient creates a new autokey admin client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// Provides interfaces for managing Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level
+// configurations. A configuration is inherited by all descendent projects. A
+// configuration at one folder overrides any other configurations in its
+// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
+// Autokey, so that users working in a descendant project can request
+// provisioned CryptoKeys, ready for Customer
+// Managed Encryption Key (CMEK) use, on-demand.
+func NewAutokeyAdminClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) {
+ clientOpts := defaultAutokeyAdminGRPCClientOptions()
+ if newAutokeyAdminClientHook != nil {
+ hookOpts, err := newAutokeyAdminClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := AutokeyAdminClient{CallOptions: defaultAutokeyAdminCallOptions()}
+
+ c := &autokeyAdminGRPCClient{
+ connPool: connPool,
+ autokeyAdminClient: kmspb.NewAutokeyAdminClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
+ iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
+ locationsClient: locationpb.NewLocationsClient(connPool),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *autokeyAdminGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *autokeyAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *autokeyAdminGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type autokeyAdminRESTClient struct {
+ // The http endpoint to connect to.
+ endpoint string
+
+ // The http client.
+ httpClient *http.Client
+
+ // The x-goog-* headers to be sent with each request.
+ xGoogHeaders []string
+
+ // Points back to the CallOptions field of the containing AutokeyAdminClient
+ CallOptions **AutokeyAdminCallOptions
+
+ logger *slog.Logger
+}
+
+// NewAutokeyAdminRESTClient creates a new autokey admin rest client.
+//
+// Provides interfaces for managing Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) folder-level
+// configurations. A configuration is inherited by all descendent projects. A
+// configuration at one folder overrides any other configurations in its
+// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
+// Autokey, so that users working in a descendant project can request
+// provisioned CryptoKeys, ready for Customer
+// Managed Encryption Key (CMEK) use, on-demand.
+func NewAutokeyAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) {
+ clientOpts := append(defaultAutokeyAdminRESTClientOptions(), opts...)
+ httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ callOpts := defaultAutokeyAdminRESTCallOptions()
+ c := &autokeyAdminRESTClient{
+ endpoint: endpoint,
+ httpClient: httpClient,
+ CallOptions: &callOpts,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ return &AutokeyAdminClient{internalClient: c, CallOptions: callOpts}, nil
+}
+
+func defaultAutokeyAdminRESTClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
+ internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
+ }
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *autokeyAdminRESTClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *autokeyAdminRESTClient) Close() error {
+ // Replace httpClient with nil to force cleanup.
+ c.httpClient = nil
+ return nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: This method always returns nil.
+func (c *autokeyAdminRESTClient) Connection() *grpc.ClientConn {
+ return nil
+}
+func (c *autokeyAdminGRPCClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...)
+ var resp *kmspb.AutokeyConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyAdminClient.UpdateAutokeyConfig, req, settings.GRPC, c.logger, "UpdateAutokeyConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...)
+ var resp *kmspb.AutokeyConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyAdminClient.GetAutokeyConfig, req, settings.GRPC, c.logger, "GetAutokeyConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...)
+ var resp *kmspb.ShowEffectiveAutokeyConfigResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyAdminClient.ShowEffectiveAutokeyConfig, req, settings.GRPC, c.logger, "ShowEffectiveAutokeyConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
+ var resp *locationpb.Location
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...)
+ it := &LocationIterator{}
+ req = proto.Clone(req).(*locationpb.ListLocationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
+ resp := &locationpb.ListLocationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetLocations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *autokeyAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ var resp *iampb.TestIamPermissionsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyAdminGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateAutokeyConfig updates the AutokeyConfig for a
+// folder. The caller must have both cloudkms.autokeyConfigs.update
+// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy
+// permission on the provided key project. A
+// KeyHandle creation in the folder’s
+// descendant projects will use this configuration to determine where to
+// create the resulting CryptoKey.
+func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetAutokeyConfig()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetAutokeyConfig().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetUpdateMask() != nil {
+ field, err := protojson.Marshal(req.GetUpdateMask())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("updateMask", string(field[1:len(field)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &kmspb.AutokeyConfig{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateAutokeyConfig")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetAutokeyConfig returns the AutokeyConfig for a
+// folder.
+func (c *autokeyAdminRESTClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &kmspb.AutokeyConfig{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetAutokeyConfig")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project.
+func (c *autokeyAdminRESTClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:showEffectiveAutokeyConfig", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &kmspb.ShowEffectiveAutokeyConfigResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ShowEffectiveAutokeyConfig")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetLocation gets information about a location.
+func (c *autokeyAdminRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &locationpb.Location{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// ListLocations lists information about the supported locations for this service.
+func (c *autokeyAdminRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ it := &LocationIterator{}
+ req = proto.Clone(req).(*locationpb.ListLocationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
+ resp := &locationpb.ListLocationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations")
+ if err != nil {
+ return err
+ }
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetLocations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
+// if the resource exists and does not have a policy set.
+func (c *autokeyAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetOptions().GetRequestedPolicyVersion() != 0 {
+ params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// SetIamPolicy sets the access control policy on the specified resource. Replaces
+// any existing policy.
+//
+// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
+// errors.
+func (c *autokeyAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// TestIamPermissions returns permissions that a caller has on the specified resource. If the
+// resource does not exist, this will return an empty set of
+// permissions, not a NOT_FOUND error.
+//
+// Note: This operation is designed to be used for building
+// permission-aware UIs and command-line tools, not for authorization
+// checking. This operation may “fail open” without warning.
+func (c *autokeyAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.TestIamPermissionsResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *autokeyAdminRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go
new file mode 100644
index 000000000..7986a78e1
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go
@@ -0,0 +1,1317 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package kms
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/http"
+ "net/url"
+ "time"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ "cloud.google.com/go/longrunning"
+ lroauto "cloud.google.com/go/longrunning/autogen"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ httptransport "google.golang.org/api/transport/http"
+ locationpb "google.golang.org/genproto/googleapis/cloud/location"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+var newAutokeyClientHook clientHook
+
+// AutokeyCallOptions contains the retry settings for each method of AutokeyClient.
+type AutokeyCallOptions struct {
+ CreateKeyHandle []gax.CallOption
+ GetKeyHandle []gax.CallOption
+ ListKeyHandles []gax.CallOption
+ GetLocation []gax.CallOption
+ ListLocations []gax.CallOption
+ GetIamPolicy []gax.CallOption
+ SetIamPolicy []gax.CallOption
+ TestIamPermissions []gax.CallOption
+ GetOperation []gax.CallOption
+}
+
+func defaultAutokeyGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultAutokeyCallOptions() *AutokeyCallOptions {
+ return &AutokeyCallOptions{
+ CreateKeyHandle: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ },
+ GetKeyHandle: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListKeyHandles: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetLocation: []gax.CallOption{},
+ ListLocations: []gax.CallOption{},
+ GetIamPolicy: []gax.CallOption{},
+ SetIamPolicy: []gax.CallOption{},
+ TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ }
+}
+
+func defaultAutokeyRESTCallOptions() *AutokeyCallOptions {
+ return &AutokeyCallOptions{
+ CreateKeyHandle: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ },
+ GetKeyHandle: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListKeyHandles: []gax.CallOption{
+ gax.WithTimeout(60000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetLocation: []gax.CallOption{},
+ ListLocations: []gax.CallOption{},
+ GetIamPolicy: []gax.CallOption{},
+ SetIamPolicy: []gax.CallOption{},
+ TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ }
+}
+
+// internalAutokeyClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API.
+type internalAutokeyClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ CreateKeyHandle(context.Context, *kmspb.CreateKeyHandleRequest, ...gax.CallOption) (*CreateKeyHandleOperation, error)
+ CreateKeyHandleOperation(name string) *CreateKeyHandleOperation
+ GetKeyHandle(context.Context, *kmspb.GetKeyHandleRequest, ...gax.CallOption) (*kmspb.KeyHandle, error)
+ ListKeyHandles(context.Context, *kmspb.ListKeyHandlesRequest, ...gax.CallOption) *KeyHandleIterator
+ GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error)
+ ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
+}
+
+// AutokeyClient is a client for interacting with Cloud Key Management Service (KMS) API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// Provides interfaces for using Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new
+// CryptoKeys, ready for Customer Managed
+// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
+// feature is modeled around a KeyHandle
+// resource: creating a KeyHandle in a resource
+// project and given location triggers Cloud KMS Autokey to provision a
+// CryptoKey in the configured key project and
+// the same location.
+//
+// Prior to use in a given resource project,
+// UpdateAutokeyConfig
+// should have been called on an ancestor folder, setting the key project where
+// Cloud KMS Autokey should create new
+// CryptoKeys. See documentation for additional
+// prerequisites. To check what key project, if any, is currently configured on
+// a resource project’s ancestor folder, see
+// ShowEffectiveAutokeyConfig.
+type AutokeyClient struct {
+ // The internal transport-dependent client.
+ internalClient internalAutokeyClient
+
+ // The call options for this service.
+ CallOptions *AutokeyCallOptions
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient *lroauto.OperationsClient
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AutokeyClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AutokeyClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *AutokeyClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// CreateKeyHandle creates a new KeyHandle, triggering the
+// provisioning of a new CryptoKey for CMEK
+// use with the given resource type in the configured key project and the same
+// location. GetOperation should
+// be used to resolve the resulting long-running operation and get the
+// resulting KeyHandle and
+// CryptoKey.
+func (c *AutokeyClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
+ return c.internalClient.CreateKeyHandle(ctx, req, opts...)
+}
+
+// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
+// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
+func (c *AutokeyClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
+ return c.internalClient.CreateKeyHandleOperation(name)
+}
+
+// GetKeyHandle returns the KeyHandle.
+func (c *AutokeyClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
+ return c.internalClient.GetKeyHandle(ctx, req, opts...)
+}
+
+// ListKeyHandles lists KeyHandles.
+func (c *AutokeyClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator {
+ return c.internalClient.ListKeyHandles(ctx, req, opts...)
+}
+
+// GetLocation gets information about a location.
+func (c *AutokeyClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ return c.internalClient.GetLocation(ctx, req, opts...)
+}
+
+// ListLocations lists information about the supported locations for this service.
+func (c *AutokeyClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ return c.internalClient.ListLocations(ctx, req, opts...)
+}
+
+// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
+// if the resource exists and does not have a policy set.
+func (c *AutokeyClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.GetIamPolicy(ctx, req, opts...)
+}
+
+// SetIamPolicy sets the access control policy on the specified resource. Replaces
+// any existing policy.
+//
+// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
+// errors.
+func (c *AutokeyClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.SetIamPolicy(ctx, req, opts...)
+}
+
+// TestIamPermissions returns permissions that a caller has on the specified resource. If the
+// resource does not exist, this will return an empty set of
+// permissions, not a NOT_FOUND error.
+//
+// Note: This operation is designed to be used for building
+// permission-aware UIs and command-line tools, not for authorization
+// checking. This operation may “fail open” without warning.
+func (c *AutokeyClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ return c.internalClient.TestIamPermissions(ctx, req, opts...)
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *AutokeyClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
+// autokeyGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type autokeyGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing AutokeyClient
+ CallOptions **AutokeyCallOptions
+
+ // The gRPC API client.
+ autokeyClient kmspb.AutokeyClient
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ operationsClient longrunningpb.OperationsClient
+
+ iamPolicyClient iampb.IAMPolicyClient
+
+ locationsClient locationpb.LocationsClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewAutokeyClient creates a new autokey client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// Provides interfaces for using Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new
+// CryptoKeys, ready for Customer Managed
+// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
+// feature is modeled around a KeyHandle
+// resource: creating a KeyHandle in a resource
+// project and given location triggers Cloud KMS Autokey to provision a
+// CryptoKey in the configured key project and
+// the same location.
+//
+// Prior to use in a given resource project,
+// UpdateAutokeyConfig
+// should have been called on an ancestor folder, setting the key project where
+// Cloud KMS Autokey should create new
+// CryptoKeys. See documentation for additional
+// prerequisites. To check what key project, if any, is currently configured on
+// a resource project’s ancestor folder, see
+// ShowEffectiveAutokeyConfig.
+func NewAutokeyClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) {
+ clientOpts := defaultAutokeyGRPCClientOptions()
+ if newAutokeyClientHook != nil {
+ hookOpts, err := newAutokeyClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := AutokeyClient{CallOptions: defaultAutokeyCallOptions()}
+
+ c := &autokeyGRPCClient{
+ connPool: connPool,
+ autokeyClient: kmspb.NewAutokeyClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
+ iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
+ locationsClient: locationpb.NewLocationsClient(connPool),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
+ if err != nil {
+ // This error "should not happen", since we are just reusing old connection pool
+ // and never actually need to dial.
+ // If this does happen, we could leak connp. However, we cannot close conn:
+ // If the user invoked the constructor with option.WithGRPCConn,
+ // we would close a connection that's still in use.
+ // TODO: investigate error conditions.
+ return nil, err
+ }
+ c.LROClient = &client.LROClient
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *autokeyGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *autokeyGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *autokeyGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type autokeyRESTClient struct {
+ // The http endpoint to connect to.
+ endpoint string
+
+ // The http client.
+ httpClient *http.Client
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ // The x-goog-* headers to be sent with each request.
+ xGoogHeaders []string
+
+ // Points back to the CallOptions field of the containing AutokeyClient
+ CallOptions **AutokeyCallOptions
+
+ logger *slog.Logger
+}
+
+// NewAutokeyRESTClient creates a new autokey rest client.
+//
+// Provides interfaces for using Cloud KMS
+// Autokey (at https://cloud.google.com/kms/help/autokey) to provision new
+// CryptoKeys, ready for Customer Managed
+// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
+// feature is modeled around a KeyHandle
+// resource: creating a KeyHandle in a resource
+// project and given location triggers Cloud KMS Autokey to provision a
+// CryptoKey in the configured key project and
+// the same location.
+//
+// Prior to use in a given resource project,
+// UpdateAutokeyConfig
+// should have been called on an ancestor folder, setting the key project where
+// Cloud KMS Autokey should create new
+// CryptoKeys. See documentation for additional
+// prerequisites. To check what key project, if any, is currently configured on
+// a resource project’s ancestor folder, see
+// ShowEffectiveAutokeyConfig.
+func NewAutokeyRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) {
+ clientOpts := append(defaultAutokeyRESTClientOptions(), opts...)
+ httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ callOpts := defaultAutokeyRESTCallOptions()
+ c := &autokeyRESTClient{
+ endpoint: endpoint,
+ httpClient: httpClient,
+ CallOptions: &callOpts,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ lroOpts := []option.ClientOption{
+ option.WithHTTPClient(httpClient),
+ option.WithEndpoint(endpoint),
+ }
+ opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
+ if err != nil {
+ return nil, err
+ }
+ c.LROClient = &opClient
+
+ return &AutokeyClient{internalClient: c, CallOptions: callOpts}, nil
+}
+
+func defaultAutokeyRESTClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
+ internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
+ }
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *autokeyRESTClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *autokeyRESTClient) Close() error {
+ // Replace httpClient with nil to force cleanup.
+ c.httpClient = nil
+ return nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: This method always returns nil.
+func (c *autokeyRESTClient) Connection() *grpc.ClientConn {
+ return nil
+}
+func (c *autokeyGRPCClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateKeyHandle[0:len((*c.CallOptions).CreateKeyHandle):len((*c.CallOptions).CreateKeyHandle)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyClient.CreateKeyHandle, req, settings.GRPC, c.logger, "CreateKeyHandle")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateKeyHandleOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...)
+ var resp *kmspb.KeyHandle
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyClient.GetKeyHandle, req, settings.GRPC, c.logger, "GetKeyHandle")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...)
+ it := &KeyHandleIterator{}
+ req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) {
+ resp := &kmspb.ListKeyHandlesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.autokeyClient.ListKeyHandles, req, settings.GRPC, c.logger, "ListKeyHandles")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetKeyHandles(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
+ var resp *locationpb.Location
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...)
+ it := &LocationIterator{}
+ req = proto.Clone(req).(*locationpb.ListLocationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
+ resp := &locationpb.ListLocationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetLocations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *autokeyGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ var resp *iampb.TestIamPermissionsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateKeyHandle creates a new KeyHandle, triggering the
+// provisioning of a new CryptoKey for CMEK
+// use with the given resource type in the configured key project and the same
+// location. GetOperation should
+// be used to resolve the resulting long-running operation and get the
+// resulting KeyHandle and
+// CryptoKey.
+func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetKeyHandle()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetKeyHandleId() != "" {
+ params.Add("keyHandleId", fmt.Sprintf("%v", req.GetKeyHandleId()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyHandle")
+ if err != nil {
+ return err
+ }
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateKeyHandleOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// GetKeyHandle returns the KeyHandle.
+func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &kmspb.KeyHandle{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyHandle")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// ListKeyHandles lists KeyHandles.
+func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) *KeyHandleIterator {
+ it := &KeyHandleIterator{}
+ req = proto.Clone(req).(*kmspb.ListKeyHandlesRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*kmspb.KeyHandle, string, error) {
+ resp := &kmspb.ListKeyHandlesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyHandles")
+ if err != nil {
+ return err
+ }
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetKeyHandles(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetLocation gets information about a location.
+func (c *autokeyRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &locationpb.Location{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// ListLocations lists information about the supported locations for this service.
+func (c *autokeyRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
+ it := &LocationIterator{}
+ req = proto.Clone(req).(*locationpb.ListLocationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
+ resp := &locationpb.ListLocationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations")
+ if err != nil {
+ return err
+ }
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetLocations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
+// if the resource exists and does not have a policy set.
+func (c *autokeyRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetOptions().GetRequestedPolicyVersion() != 0 {
+ params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// SetIamPolicy sets the access control policy on the specified resource. Replaces
+// any existing policy.
+//
+// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
+// errors.
+func (c *autokeyRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// TestIamPermissions returns permissions that a caller has on the specified resource. If the
+// resource does not exist, this will return an empty set of
+// permissions, not a NOT_FOUND error.
+//
+// Note: This operation is designed to be used for building
+// permission-aware UIs and command-line tools, not for authorization
+// checking. This operation may “fail open” without warning.
+func (c *autokeyRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.TestIamPermissionsResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *autokeyRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
+// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
+func (c *autokeyGRPCClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
+ return &CreateKeyHandleOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
+// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
+func (c *autokeyRESTClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateKeyHandleOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
new file mode 100644
index 000000000..36e6bf84c
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
@@ -0,0 +1,421 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package kms
+
+import (
+ "context"
+ "time"
+
+ kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ "cloud.google.com/go/longrunning"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ locationpb "google.golang.org/genproto/googleapis/cloud/location"
+)
+
+// CreateKeyHandleOperation manages a long-running operation from CreateKeyHandle.
+type CreateKeyHandleOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateKeyHandleOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp kmspb.KeyHandle
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateKeyHandleOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp kmspb.KeyHandle
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateKeyHandleOperation) Metadata() (*kmspb.CreateKeyHandleMetadata, error) {
+ var meta kmspb.CreateKeyHandleMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateKeyHandleOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateKeyHandleOperation) Name() string {
+ return op.lro.Name()
+}
+
+// CryptoKeyIterator manages a stream of *kmspb.CryptoKey.
+type CryptoKeyIterator struct {
+ items []*kmspb.CryptoKey
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) {
+ var item *kmspb.CryptoKey
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *CryptoKeyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *CryptoKeyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion.
+type CryptoKeyVersionIterator struct {
+ items []*kmspb.CryptoKeyVersion
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) {
+ var item *kmspb.CryptoKeyVersion
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *CryptoKeyVersionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *CryptoKeyVersionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// EkmConnectionIterator manages a stream of *kmspb.EkmConnection.
+type EkmConnectionIterator struct {
+ items []*kmspb.EkmConnection
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) {
+ var item *kmspb.EkmConnection
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *EkmConnectionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *EkmConnectionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ImportJobIterator manages a stream of *kmspb.ImportJob.
+type ImportJobIterator struct {
+ items []*kmspb.ImportJob
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *ImportJobIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) {
+ var item *kmspb.ImportJob
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ImportJobIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ImportJobIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// KeyHandleIterator manages a stream of *kmspb.KeyHandle.
+type KeyHandleIterator struct {
+ items []*kmspb.KeyHandle
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyHandle, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *KeyHandleIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *KeyHandleIterator) Next() (*kmspb.KeyHandle, error) {
+ var item *kmspb.KeyHandle
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *KeyHandleIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *KeyHandleIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// KeyRingIterator manages a stream of *kmspb.KeyRing.
+type KeyRingIterator struct {
+ items []*kmspb.KeyRing
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *KeyRingIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) {
+ var item *kmspb.KeyRing
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *KeyRingIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *KeyRingIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// LocationIterator manages a stream of *locationpb.Location.
+type LocationIterator struct {
+ items []*locationpb.Location
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *LocationIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LocationIterator) Next() (*locationpb.Location, error) {
+ var item *locationpb.Location
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *LocationIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *LocationIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go
new file mode 100644
index 000000000..b8517f381
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/auxiliary_go123.go
@@ -0,0 +1,69 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package kms
+
+import (
+ "iter"
+
+ kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ "github.com/googleapis/gax-go/v2/iterator"
+ locationpb "google.golang.org/genproto/googleapis/cloud/location"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *CryptoKeyIterator) All() iter.Seq2[*kmspb.CryptoKey, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *CryptoKeyVersionIterator) All() iter.Seq2[*kmspb.CryptoKeyVersion, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *EkmConnectionIterator) All() iter.Seq2[*kmspb.EkmConnection, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ImportJobIterator) All() iter.Seq2[*kmspb.ImportJob, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *KeyHandleIterator) All() iter.Seq2[*kmspb.KeyHandle, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *KeyRingIterator) All() iter.Seq2[*kmspb.KeyRing, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *LocationIterator) All() iter.Seq2[*locationpb.Location, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/doc.go b/vendor/cloud.google.com/go/kms/apiv1/doc.go
index f5fad9615..7f85be3e6 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/doc.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -36,13 +36,14 @@
//
// To get started with this package, create a client.
//
+// // go get cloud.google.com/go/kms/apiv1@latest
// ctx := context.Background()
// // This snippet has been automatically generated and should be regarded as a code template only.
// // It will require modifications to work:
// // - It may require correct/in-range values for request initialization.
// // - It may require specifying regional endpoints when creating the service client as shown in:
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := kms.NewEkmClient(ctx)
+// c, err := kms.NewAutokeyClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
@@ -54,40 +55,27 @@
//
// # Using the Client
//
-// The following is an example of making an API call with the newly created client.
+// The following is an example of making an API call with the newly created client, mentioned above.
//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := kms.NewEkmClient(ctx)
+// req := &kmspb.CreateKeyHandleRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#CreateKeyHandleRequest.
+// }
+// op, err := c.CreateKeyHandle(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
-// defer c.Close()
//
-// req := &kmspb.ListEkmConnectionsRequest{
-// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#ListEkmConnectionsRequest.
-// }
-// it := c.ListEkmConnections(ctx, req)
-// for {
-// resp, err := it.Next()
-// if err == iterator.Done {
-// break
-// }
-// if err != nil {
-// // TODO: Handle error.
-// }
-// // TODO: Use resp.
-// _ = resp
+// resp, err := op.Wait(ctx)
+// if err != nil {
+// // TODO: Handle error.
// }
+// // TODO: Use resp.
+// _ = resp
//
// # Use of Context
//
-// The ctx passed to NewEkmClient is used for authentication requests and
+// The ctx passed to NewAutokeyClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
@@ -99,31 +87,3 @@
// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
package kms // import "cloud.google.com/go/kms/apiv1"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloudkms",
- }
-}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
index 2cf9f9652..f7ef5ac46 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import (
"bytes"
"context"
"fmt"
- "io"
+ "log/slog"
"math"
"net/http"
"net/url"
@@ -28,8 +28,8 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -58,15 +58,19 @@ type EkmCallOptions struct {
GetIamPolicy []gax.CallOption
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
+ GetOperation []gax.CallOption
}
func defaultEkmGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -134,6 +138,7 @@ func defaultEkmCallOptions() *EkmCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
}
}
@@ -195,6 +200,7 @@ func defaultEkmRESTCallOptions() *EkmCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
}
}
@@ -215,6 +221,7 @@ type internalEkmClient interface {
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
}
// EkmClient is a client for interacting with Cloud Key Management Service (KMS) API.
@@ -336,6 +343,11 @@ func (c *EkmClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPe
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *EkmClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
// ekmGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -349,12 +361,16 @@ type ekmGRPCClient struct {
// The gRPC API client.
ekmClient kmspb.EkmServiceClient
+ operationsClient longrunningpb.OperationsClient
+
iamPolicyClient iampb.IAMPolicyClient
locationsClient locationpb.LocationsClient
// The x-goog-* metadata to be sent with each request.
xGoogHeaders []string
+
+ logger *slog.Logger
}
// NewEkmClient creates a new ekm service client based on gRPC.
@@ -383,11 +399,13 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient,
client := EkmClient{CallOptions: defaultEkmCallOptions()}
c := &ekmGRPCClient{
- connPool: connPool,
- ekmClient: kmspb.NewEkmServiceClient(connPool),
- CallOptions: &client.CallOptions,
- iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
- locationsClient: locationpb.NewLocationsClient(connPool),
+ connPool: connPool,
+ ekmClient: kmspb.NewEkmServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
+ iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
+ locationsClient: locationpb.NewLocationsClient(connPool),
}
c.setGoogleClientInfo()
@@ -410,7 +428,9 @@ func (c *ekmGRPCClient) Connection() *grpc.ClientConn {
func (c *ekmGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -432,6 +452,8 @@ type ekmRESTClient struct {
// Points back to the CallOptions field of the containing EkmClient
CallOptions **EkmCallOptions
+
+ logger *slog.Logger
}
// NewEkmRESTClient creates a new ekm service rest client.
@@ -454,6 +476,7 @@ func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmCli
endpoint: endpoint,
httpClient: httpClient,
CallOptions: &callOpts,
+ logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
@@ -463,9 +486,12 @@ func NewEkmRESTClient(ctx context.Context, opts ...option.ClientOption) (*EkmCli
func defaultEkmRESTClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
}
}
@@ -475,7 +501,9 @@ func defaultEkmRESTClientOptions() []option.ClientOption {
func (c *ekmRESTClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -512,7 +540,7 @@ func (c *ekmGRPCClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.ListEkmConnections(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.ListEkmConnections, req, settings.GRPC, c.logger, "ListEkmConnections")
return err
}, opts...)
if err != nil {
@@ -547,7 +575,7 @@ func (c *ekmGRPCClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC
var resp *kmspb.EkmConnection
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.GetEkmConnection(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.GetEkmConnection, req, settings.GRPC, c.logger, "GetEkmConnection")
return err
}, opts...)
if err != nil {
@@ -565,7 +593,7 @@ func (c *ekmGRPCClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea
var resp *kmspb.EkmConnection
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.CreateEkmConnection(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.CreateEkmConnection, req, settings.GRPC, c.logger, "CreateEkmConnection")
return err
}, opts...)
if err != nil {
@@ -583,7 +611,7 @@ func (c *ekmGRPCClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda
var resp *kmspb.EkmConnection
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.UpdateEkmConnection(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConnection, req, settings.GRPC, c.logger, "UpdateEkmConnection")
return err
}, opts...)
if err != nil {
@@ -601,7 +629,7 @@ func (c *ekmGRPCClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi
var resp *kmspb.EkmConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.GetEkmConfig(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.GetEkmConfig, req, settings.GRPC, c.logger, "GetEkmConfig")
return err
}, opts...)
if err != nil {
@@ -619,7 +647,7 @@ func (c *ekmGRPCClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk
var resp *kmspb.EkmConfig
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.UpdateEkmConfig(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.UpdateEkmConfig, req, settings.GRPC, c.logger, "UpdateEkmConfig")
return err
}, opts...)
if err != nil {
@@ -637,7 +665,7 @@ func (c *ekmGRPCClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif
var resp *kmspb.VerifyConnectivityResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.ekmClient.VerifyConnectivity(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.ekmClient.VerifyConnectivity, req, settings.GRPC, c.logger, "VerifyConnectivity")
return err
}, opts...)
if err != nil {
@@ -655,7 +683,7 @@ func (c *ekmGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLoca
var resp *locationpb.Location
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation")
return err
}, opts...)
if err != nil {
@@ -684,7 +712,7 @@ func (c *ekmGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListL
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations")
return err
}, opts...)
if err != nil {
@@ -719,7 +747,7 @@ func (c *ekmGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -737,7 +765,7 @@ func (c *ekmGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -755,7 +783,25 @@ func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
var resp *iampb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *ekmGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation")
return err
}, opts...)
if err != nil {
@@ -815,21 +861,10 @@ func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListE
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListEkmConnections")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -893,17 +928,7 @@ func (c *ekmRESTClient) GetEkmConnection(ctx context.Context, req *kmspb.GetEkmC
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConnection")
if err != nil {
return err
}
@@ -962,17 +987,7 @@ func (c *ekmRESTClient) CreateEkmConnection(ctx context.Context, req *kmspb.Crea
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateEkmConnection")
if err != nil {
return err
}
@@ -1007,11 +1022,11 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda
params := url.Values{}
params.Add("$alt", "json;enum-encoding=int")
if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ field, err := protojson.Marshal(req.GetUpdateMask())
if err != nil {
return nil, err
}
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ params.Add("updateMask", string(field[1:len(field)-1]))
}
baseUrl.RawQuery = params.Encode()
@@ -1036,17 +1051,7 @@ func (c *ekmRESTClient) UpdateEkmConnection(ctx context.Context, req *kmspb.Upda
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConnection")
if err != nil {
return err
}
@@ -1097,17 +1102,7 @@ func (c *ekmRESTClient) GetEkmConfig(ctx context.Context, req *kmspb.GetEkmConfi
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetEkmConfig")
if err != nil {
return err
}
@@ -1143,11 +1138,11 @@ func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk
params := url.Values{}
params.Add("$alt", "json;enum-encoding=int")
if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ field, err := protojson.Marshal(req.GetUpdateMask())
if err != nil {
return nil, err
}
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ params.Add("updateMask", string(field[1:len(field)-1]))
}
baseUrl.RawQuery = params.Encode()
@@ -1172,17 +1167,7 @@ func (c *ekmRESTClient) UpdateEkmConfig(ctx context.Context, req *kmspb.UpdateEk
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateEkmConfig")
if err != nil {
return err
}
@@ -1236,17 +1221,7 @@ func (c *ekmRESTClient) VerifyConnectivity(ctx context.Context, req *kmspb.Verif
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "VerifyConnectivity")
if err != nil {
return err
}
@@ -1296,17 +1271,7 @@ func (c *ekmRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLoca
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation")
if err != nil {
return err
}
@@ -1371,21 +1336,10 @@ func (c *ekmRESTClient) ListLocations(ctx context.Context, req *locationpb.ListL
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -1452,17 +1406,7 @@ func (c *ekmRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolic
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy")
if err != nil {
return err
}
@@ -1522,17 +1466,7 @@ func (c *ekmRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolic
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy")
if err != nil {
return err
}
@@ -1594,17 +1528,7 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions")
if err != nil {
return err
}
@@ -1621,96 +1545,52 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
return resp, nil
}
-// EkmConnectionIterator manages a stream of *kmspb.EkmConnection.
-type EkmConnectionIterator struct {
- items []*kmspb.EkmConnection
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.EkmConnection, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *EkmConnectionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *EkmConnectionIterator) Next() (*kmspb.EkmConnection, error) {
- var item *kmspb.EkmConnection
- if err := it.nextFunc(); err != nil {
- return item, err
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *ekmRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
}
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-func (it *EkmConnectionIterator) bufLen() int {
- return len(it.items)
-}
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
-func (it *EkmConnectionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
+ baseUrl.RawQuery = params.Encode()
-// LocationIterator manages a stream of *locationpb.Location.
-type LocationIterator struct {
- items []*locationpb.Location
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*locationpb.Location, nextPageToken string, err error)
-}
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *LocationIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *LocationIterator) Next() (*locationpb.Location, error) {
- var item *locationpb.Location
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation")
+ if err != nil {
+ return err
+ }
-func (it *LocationIterator) bufLen() int {
- return len(it.items)
-}
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
-func (it *LocationIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json
new file mode 100644
index 000000000..47b250252
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json
@@ -0,0 +1,715 @@
+{
+ "schema": "1.0",
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
+ "language": "go",
+ "protoPackage": "google.cloud.kms.v1",
+ "libraryPackage": "cloud.google.com/go/kms/apiv1",
+ "services": {
+ "Autokey": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "AutokeyClient",
+ "rpcs": {
+ "CreateKeyHandle": {
+ "methods": [
+ "CreateKeyHandle"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetKeyHandle": {
+ "methods": [
+ "GetKeyHandle"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListKeyHandles": {
+ "methods": [
+ "ListKeyHandles"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "AutokeyClient",
+ "rpcs": {
+ "CreateKeyHandle": {
+ "methods": [
+ "CreateKeyHandle"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetKeyHandle": {
+ "methods": [
+ "GetKeyHandle"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListKeyHandles": {
+ "methods": [
+ "ListKeyHandles"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "AutokeyAdmin": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "AutokeyAdminClient",
+ "rpcs": {
+ "GetAutokeyConfig": {
+ "methods": [
+ "GetAutokeyConfig"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "ShowEffectiveAutokeyConfig": {
+ "methods": [
+ "ShowEffectiveAutokeyConfig"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateAutokeyConfig": {
+ "methods": [
+ "UpdateAutokeyConfig"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "AutokeyAdminClient",
+ "rpcs": {
+ "GetAutokeyConfig": {
+ "methods": [
+ "GetAutokeyConfig"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "ShowEffectiveAutokeyConfig": {
+ "methods": [
+ "ShowEffectiveAutokeyConfig"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateAutokeyConfig": {
+ "methods": [
+ "UpdateAutokeyConfig"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "EkmService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "EkmClient",
+ "rpcs": {
+ "CreateEkmConnection": {
+ "methods": [
+ "CreateEkmConnection"
+ ]
+ },
+ "GetEkmConfig": {
+ "methods": [
+ "GetEkmConfig"
+ ]
+ },
+ "GetEkmConnection": {
+ "methods": [
+ "GetEkmConnection"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListEkmConnections": {
+ "methods": [
+ "ListEkmConnections"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateEkmConfig": {
+ "methods": [
+ "UpdateEkmConfig"
+ ]
+ },
+ "UpdateEkmConnection": {
+ "methods": [
+ "UpdateEkmConnection"
+ ]
+ },
+ "VerifyConnectivity": {
+ "methods": [
+ "VerifyConnectivity"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "EkmClient",
+ "rpcs": {
+ "CreateEkmConnection": {
+ "methods": [
+ "CreateEkmConnection"
+ ]
+ },
+ "GetEkmConfig": {
+ "methods": [
+ "GetEkmConfig"
+ ]
+ },
+ "GetEkmConnection": {
+ "methods": [
+ "GetEkmConnection"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListEkmConnections": {
+ "methods": [
+ "ListEkmConnections"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateEkmConfig": {
+ "methods": [
+ "UpdateEkmConfig"
+ ]
+ },
+ "UpdateEkmConnection": {
+ "methods": [
+ "UpdateEkmConnection"
+ ]
+ },
+ "VerifyConnectivity": {
+ "methods": [
+ "VerifyConnectivity"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "KeyManagementService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "KeyManagementClient",
+ "rpcs": {
+ "AsymmetricDecrypt": {
+ "methods": [
+ "AsymmetricDecrypt"
+ ]
+ },
+ "AsymmetricSign": {
+ "methods": [
+ "AsymmetricSign"
+ ]
+ },
+ "CreateCryptoKey": {
+ "methods": [
+ "CreateCryptoKey"
+ ]
+ },
+ "CreateCryptoKeyVersion": {
+ "methods": [
+ "CreateCryptoKeyVersion"
+ ]
+ },
+ "CreateImportJob": {
+ "methods": [
+ "CreateImportJob"
+ ]
+ },
+ "CreateKeyRing": {
+ "methods": [
+ "CreateKeyRing"
+ ]
+ },
+ "Decrypt": {
+ "methods": [
+ "Decrypt"
+ ]
+ },
+ "DestroyCryptoKeyVersion": {
+ "methods": [
+ "DestroyCryptoKeyVersion"
+ ]
+ },
+ "Encrypt": {
+ "methods": [
+ "Encrypt"
+ ]
+ },
+ "GenerateRandomBytes": {
+ "methods": [
+ "GenerateRandomBytes"
+ ]
+ },
+ "GetCryptoKey": {
+ "methods": [
+ "GetCryptoKey"
+ ]
+ },
+ "GetCryptoKeyVersion": {
+ "methods": [
+ "GetCryptoKeyVersion"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetImportJob": {
+ "methods": [
+ "GetImportJob"
+ ]
+ },
+ "GetKeyRing": {
+ "methods": [
+ "GetKeyRing"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "GetPublicKey": {
+ "methods": [
+ "GetPublicKey"
+ ]
+ },
+ "ImportCryptoKeyVersion": {
+ "methods": [
+ "ImportCryptoKeyVersion"
+ ]
+ },
+ "ListCryptoKeyVersions": {
+ "methods": [
+ "ListCryptoKeyVersions"
+ ]
+ },
+ "ListCryptoKeys": {
+ "methods": [
+ "ListCryptoKeys"
+ ]
+ },
+ "ListImportJobs": {
+ "methods": [
+ "ListImportJobs"
+ ]
+ },
+ "ListKeyRings": {
+ "methods": [
+ "ListKeyRings"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "MacSign": {
+ "methods": [
+ "MacSign"
+ ]
+ },
+ "MacVerify": {
+ "methods": [
+ "MacVerify"
+ ]
+ },
+ "RawDecrypt": {
+ "methods": [
+ "RawDecrypt"
+ ]
+ },
+ "RawEncrypt": {
+ "methods": [
+ "RawEncrypt"
+ ]
+ },
+ "RestoreCryptoKeyVersion": {
+ "methods": [
+ "RestoreCryptoKeyVersion"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateCryptoKey": {
+ "methods": [
+ "UpdateCryptoKey"
+ ]
+ },
+ "UpdateCryptoKeyPrimaryVersion": {
+ "methods": [
+ "UpdateCryptoKeyPrimaryVersion"
+ ]
+ },
+ "UpdateCryptoKeyVersion": {
+ "methods": [
+ "UpdateCryptoKeyVersion"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "KeyManagementClient",
+ "rpcs": {
+ "AsymmetricDecrypt": {
+ "methods": [
+ "AsymmetricDecrypt"
+ ]
+ },
+ "AsymmetricSign": {
+ "methods": [
+ "AsymmetricSign"
+ ]
+ },
+ "CreateCryptoKey": {
+ "methods": [
+ "CreateCryptoKey"
+ ]
+ },
+ "CreateCryptoKeyVersion": {
+ "methods": [
+ "CreateCryptoKeyVersion"
+ ]
+ },
+ "CreateImportJob": {
+ "methods": [
+ "CreateImportJob"
+ ]
+ },
+ "CreateKeyRing": {
+ "methods": [
+ "CreateKeyRing"
+ ]
+ },
+ "Decrypt": {
+ "methods": [
+ "Decrypt"
+ ]
+ },
+ "DestroyCryptoKeyVersion": {
+ "methods": [
+ "DestroyCryptoKeyVersion"
+ ]
+ },
+ "Encrypt": {
+ "methods": [
+ "Encrypt"
+ ]
+ },
+ "GenerateRandomBytes": {
+ "methods": [
+ "GenerateRandomBytes"
+ ]
+ },
+ "GetCryptoKey": {
+ "methods": [
+ "GetCryptoKey"
+ ]
+ },
+ "GetCryptoKeyVersion": {
+ "methods": [
+ "GetCryptoKeyVersion"
+ ]
+ },
+ "GetIamPolicy": {
+ "methods": [
+ "GetIamPolicy"
+ ]
+ },
+ "GetImportJob": {
+ "methods": [
+ "GetImportJob"
+ ]
+ },
+ "GetKeyRing": {
+ "methods": [
+ "GetKeyRing"
+ ]
+ },
+ "GetLocation": {
+ "methods": [
+ "GetLocation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "GetPublicKey": {
+ "methods": [
+ "GetPublicKey"
+ ]
+ },
+ "ImportCryptoKeyVersion": {
+ "methods": [
+ "ImportCryptoKeyVersion"
+ ]
+ },
+ "ListCryptoKeyVersions": {
+ "methods": [
+ "ListCryptoKeyVersions"
+ ]
+ },
+ "ListCryptoKeys": {
+ "methods": [
+ "ListCryptoKeys"
+ ]
+ },
+ "ListImportJobs": {
+ "methods": [
+ "ListImportJobs"
+ ]
+ },
+ "ListKeyRings": {
+ "methods": [
+ "ListKeyRings"
+ ]
+ },
+ "ListLocations": {
+ "methods": [
+ "ListLocations"
+ ]
+ },
+ "MacSign": {
+ "methods": [
+ "MacSign"
+ ]
+ },
+ "MacVerify": {
+ "methods": [
+ "MacVerify"
+ ]
+ },
+ "RawDecrypt": {
+ "methods": [
+ "RawDecrypt"
+ ]
+ },
+ "RawEncrypt": {
+ "methods": [
+ "RawEncrypt"
+ ]
+ },
+ "RestoreCryptoKeyVersion": {
+ "methods": [
+ "RestoreCryptoKeyVersion"
+ ]
+ },
+ "SetIamPolicy": {
+ "methods": [
+ "SetIamPolicy"
+ ]
+ },
+ "TestIamPermissions": {
+ "methods": [
+ "TestIamPermissions"
+ ]
+ },
+ "UpdateCryptoKey": {
+ "methods": [
+ "UpdateCryptoKey"
+ ]
+ },
+ "UpdateCryptoKeyPrimaryVersion": {
+ "methods": [
+ "UpdateCryptoKeyPrimaryVersion"
+ ]
+ },
+ "UpdateCryptoKeyVersion": {
+ "methods": [
+ "UpdateCryptoKeyVersion"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/helpers.go b/vendor/cloud.google.com/go/kms/apiv1/helpers.go
new file mode 100644
index 000000000..afdc9d080
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/helpers.go
@@ -0,0 +1,102 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package kms
+
+import (
+ "context"
+ "io"
+ "log/slog"
+ "net/http"
+
+ "github.com/googleapis/gax-go/v2/internallog"
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "cloudkms.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloudkms",
+ }
+}
+
+func executeHTTPRequestWithResponse(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, *http.Response, error) {
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body))
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, buf))
+ if err = googleapi.CheckResponseWithBody(resp, buf); err != nil {
+ return nil, nil, err
+ }
+ return buf, resp, nil
+}
+
+func executeHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, error) {
+ buf, _, err := executeHTTPRequestWithResponse(ctx, client, req, logger, body, rpc)
+ return buf, err
+}
+
+func executeStreamingHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) (*http.Response, error) {
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body))
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, nil))
+ if err = googleapi.CheckResponse(resp); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
index 92293dc29..2c99b71de 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import (
"bytes"
"context"
"fmt"
- "io"
+ "log/slog"
"math"
"net/http"
"net/url"
@@ -28,8 +28,8 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
kmspb "cloud.google.com/go/kms/apiv1/kmspb"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
@@ -79,15 +79,19 @@ type KeyManagementCallOptions struct {
GetIamPolicy []gax.CallOption
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
+ GetOperation []gax.CallOption
}
func defaultKeyManagementGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -420,6 +424,7 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
}
}
@@ -726,6 +731,7 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
}
}
@@ -767,6 +773,7 @@ type internalKeyManagementClient interface {
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
}
// KeyManagementClient is a client for interacting with Cloud Key Management Service (KMS) API.
@@ -1096,6 +1103,11 @@ func (c *KeyManagementClient) TestIamPermissions(ctx context.Context, req *iampb
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *KeyManagementClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
// keyManagementGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -1109,12 +1121,16 @@ type keyManagementGRPCClient struct {
// The gRPC API client.
keyManagementClient kmspb.KeyManagementServiceClient
+ operationsClient longrunningpb.OperationsClient
+
iamPolicyClient iampb.IAMPolicyClient
locationsClient locationpb.LocationsClient
// The x-goog-* metadata to be sent with each request.
xGoogHeaders []string
+
+ logger *slog.Logger
}
// NewKeyManagementClient creates a new key management service client based on gRPC.
@@ -1155,6 +1171,8 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (*
connPool: connPool,
keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool),
CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
locationsClient: locationpb.NewLocationsClient(connPool),
}
@@ -1179,7 +1197,9 @@ func (c *keyManagementGRPCClient) Connection() *grpc.ClientConn {
func (c *keyManagementGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -1201,6 +1221,8 @@ type keyManagementRESTClient struct {
// Points back to the CallOptions field of the containing KeyManagementClient
CallOptions **KeyManagementCallOptions
+
+ logger *slog.Logger
}
// NewKeyManagementRESTClient creates a new key management service rest client.
@@ -1232,6 +1254,7 @@ func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption
endpoint: endpoint,
httpClient: httpClient,
CallOptions: &callOpts,
+ logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
@@ -1241,9 +1264,12 @@ func NewKeyManagementRESTClient(ctx context.Context, opts ...option.ClientOption
func defaultKeyManagementRESTClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
}
}
@@ -1253,7 +1279,9 @@ func defaultKeyManagementRESTClientOptions() []option.ClientOption {
func (c *keyManagementRESTClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -1290,7 +1318,7 @@ func (c *keyManagementGRPCClient) ListKeyRings(ctx context.Context, req *kmspb.L
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.ListKeyRings(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.ListKeyRings, req, settings.GRPC, c.logger, "ListKeyRings")
return err
}, opts...)
if err != nil {
@@ -1336,7 +1364,7 @@ func (c *keyManagementGRPCClient) ListCryptoKeys(ctx context.Context, req *kmspb
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.ListCryptoKeys(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeys, req, settings.GRPC, c.logger, "ListCryptoKeys")
return err
}, opts...)
if err != nil {
@@ -1382,7 +1410,7 @@ func (c *keyManagementGRPCClient) ListCryptoKeyVersions(ctx context.Context, req
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.ListCryptoKeyVersions(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.ListCryptoKeyVersions, req, settings.GRPC, c.logger, "ListCryptoKeyVersions")
return err
}, opts...)
if err != nil {
@@ -1428,7 +1456,7 @@ func (c *keyManagementGRPCClient) ListImportJobs(ctx context.Context, req *kmspb
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.ListImportJobs(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.ListImportJobs, req, settings.GRPC, c.logger, "ListImportJobs")
return err
}, opts...)
if err != nil {
@@ -1463,7 +1491,7 @@ func (c *keyManagementGRPCClient) GetKeyRing(ctx context.Context, req *kmspb.Get
var resp *kmspb.KeyRing
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GetKeyRing(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GetKeyRing, req, settings.GRPC, c.logger, "GetKeyRing")
return err
}, opts...)
if err != nil {
@@ -1481,7 +1509,7 @@ func (c *keyManagementGRPCClient) GetCryptoKey(ctx context.Context, req *kmspb.G
var resp *kmspb.CryptoKey
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GetCryptoKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKey, req, settings.GRPC, c.logger, "GetCryptoKey")
return err
}, opts...)
if err != nil {
@@ -1499,7 +1527,7 @@ func (c *keyManagementGRPCClient) GetCryptoKeyVersion(ctx context.Context, req *
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GetCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GetCryptoKeyVersion, req, settings.GRPC, c.logger, "GetCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1517,7 +1545,7 @@ func (c *keyManagementGRPCClient) GetPublicKey(ctx context.Context, req *kmspb.G
var resp *kmspb.PublicKey
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GetPublicKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GetPublicKey, req, settings.GRPC, c.logger, "GetPublicKey")
return err
}, opts...)
if err != nil {
@@ -1535,7 +1563,7 @@ func (c *keyManagementGRPCClient) GetImportJob(ctx context.Context, req *kmspb.G
var resp *kmspb.ImportJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GetImportJob(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GetImportJob, req, settings.GRPC, c.logger, "GetImportJob")
return err
}, opts...)
if err != nil {
@@ -1553,7 +1581,7 @@ func (c *keyManagementGRPCClient) CreateKeyRing(ctx context.Context, req *kmspb.
var resp *kmspb.KeyRing
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.CreateKeyRing(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.CreateKeyRing, req, settings.GRPC, c.logger, "CreateKeyRing")
return err
}, opts...)
if err != nil {
@@ -1571,7 +1599,7 @@ func (c *keyManagementGRPCClient) CreateCryptoKey(ctx context.Context, req *kmsp
var resp *kmspb.CryptoKey
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.CreateCryptoKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKey, req, settings.GRPC, c.logger, "CreateCryptoKey")
return err
}, opts...)
if err != nil {
@@ -1589,7 +1617,7 @@ func (c *keyManagementGRPCClient) CreateCryptoKeyVersion(ctx context.Context, re
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.CreateCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.CreateCryptoKeyVersion, req, settings.GRPC, c.logger, "CreateCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1607,7 +1635,7 @@ func (c *keyManagementGRPCClient) ImportCryptoKeyVersion(ctx context.Context, re
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.ImportCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.ImportCryptoKeyVersion, req, settings.GRPC, c.logger, "ImportCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1625,7 +1653,7 @@ func (c *keyManagementGRPCClient) CreateImportJob(ctx context.Context, req *kmsp
var resp *kmspb.ImportJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.CreateImportJob(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.CreateImportJob, req, settings.GRPC, c.logger, "CreateImportJob")
return err
}, opts...)
if err != nil {
@@ -1643,7 +1671,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKey(ctx context.Context, req *kmsp
var resp *kmspb.CryptoKey
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.UpdateCryptoKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKey, req, settings.GRPC, c.logger, "UpdateCryptoKey")
return err
}, opts...)
if err != nil {
@@ -1661,7 +1689,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyVersion(ctx context.Context, re
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.UpdateCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1679,7 +1707,7 @@ func (c *keyManagementGRPCClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont
var resp *kmspb.CryptoKey
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.UpdateCryptoKeyPrimaryVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.UpdateCryptoKeyPrimaryVersion, req, settings.GRPC, c.logger, "UpdateCryptoKeyPrimaryVersion")
return err
}, opts...)
if err != nil {
@@ -1697,7 +1725,7 @@ func (c *keyManagementGRPCClient) DestroyCryptoKeyVersion(ctx context.Context, r
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.DestroyCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.DestroyCryptoKeyVersion, req, settings.GRPC, c.logger, "DestroyCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1715,7 +1743,7 @@ func (c *keyManagementGRPCClient) RestoreCryptoKeyVersion(ctx context.Context, r
var resp *kmspb.CryptoKeyVersion
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.RestoreCryptoKeyVersion(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.RestoreCryptoKeyVersion, req, settings.GRPC, c.logger, "RestoreCryptoKeyVersion")
return err
}, opts...)
if err != nil {
@@ -1733,7 +1761,7 @@ func (c *keyManagementGRPCClient) Encrypt(ctx context.Context, req *kmspb.Encryp
var resp *kmspb.EncryptResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.Encrypt(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.Encrypt, req, settings.GRPC, c.logger, "Encrypt")
return err
}, opts...)
if err != nil {
@@ -1751,7 +1779,7 @@ func (c *keyManagementGRPCClient) Decrypt(ctx context.Context, req *kmspb.Decryp
var resp *kmspb.DecryptResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.Decrypt(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.Decrypt, req, settings.GRPC, c.logger, "Decrypt")
return err
}, opts...)
if err != nil {
@@ -1769,7 +1797,7 @@ func (c *keyManagementGRPCClient) RawEncrypt(ctx context.Context, req *kmspb.Raw
var resp *kmspb.RawEncryptResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.RawEncrypt(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.RawEncrypt, req, settings.GRPC, c.logger, "RawEncrypt")
return err
}, opts...)
if err != nil {
@@ -1787,7 +1815,7 @@ func (c *keyManagementGRPCClient) RawDecrypt(ctx context.Context, req *kmspb.Raw
var resp *kmspb.RawDecryptResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.RawDecrypt(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.RawDecrypt, req, settings.GRPC, c.logger, "RawDecrypt")
return err
}, opts...)
if err != nil {
@@ -1805,7 +1833,7 @@ func (c *keyManagementGRPCClient) AsymmetricSign(ctx context.Context, req *kmspb
var resp *kmspb.AsymmetricSignResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.AsymmetricSign(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricSign, req, settings.GRPC, c.logger, "AsymmetricSign")
return err
}, opts...)
if err != nil {
@@ -1823,7 +1851,7 @@ func (c *keyManagementGRPCClient) AsymmetricDecrypt(ctx context.Context, req *km
var resp *kmspb.AsymmetricDecryptResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.AsymmetricDecrypt(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.AsymmetricDecrypt, req, settings.GRPC, c.logger, "AsymmetricDecrypt")
return err
}, opts...)
if err != nil {
@@ -1841,7 +1869,7 @@ func (c *keyManagementGRPCClient) MacSign(ctx context.Context, req *kmspb.MacSig
var resp *kmspb.MacSignResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.MacSign(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.MacSign, req, settings.GRPC, c.logger, "MacSign")
return err
}, opts...)
if err != nil {
@@ -1859,7 +1887,7 @@ func (c *keyManagementGRPCClient) MacVerify(ctx context.Context, req *kmspb.MacV
var resp *kmspb.MacVerifyResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.MacVerify(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.MacVerify, req, settings.GRPC, c.logger, "MacVerify")
return err
}, opts...)
if err != nil {
@@ -1877,7 +1905,7 @@ func (c *keyManagementGRPCClient) GenerateRandomBytes(ctx context.Context, req *
var resp *kmspb.GenerateRandomBytesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.keyManagementClient.GenerateRandomBytes(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.keyManagementClient.GenerateRandomBytes, req, settings.GRPC, c.logger, "GenerateRandomBytes")
return err
}, opts...)
if err != nil {
@@ -1895,7 +1923,7 @@ func (c *keyManagementGRPCClient) GetLocation(ctx context.Context, req *location
var resp *locationpb.Location
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.locationsClient.GetLocation, req, settings.GRPC, c.logger, "GetLocation")
return err
}, opts...)
if err != nil {
@@ -1924,7 +1952,7 @@ func (c *keyManagementGRPCClient) ListLocations(ctx context.Context, req *locati
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.locationsClient.ListLocations, req, settings.GRPC, c.logger, "ListLocations")
return err
}, opts...)
if err != nil {
@@ -1959,7 +1987,7 @@ func (c *keyManagementGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.G
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1977,7 +2005,7 @@ func (c *keyManagementGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.S
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1995,7 +2023,25 @@ func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *i
var resp *iampb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.iamPolicyClient.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *keyManagementGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation")
return err
}, opts...)
if err != nil {
@@ -2055,21 +2101,10 @@ func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.L
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListKeyRings")
if err != nil {
return err
}
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -2153,21 +2188,10 @@ func (c *keyManagementRESTClient) ListCryptoKeys(ctx context.Context, req *kmspb
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeys")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -2251,21 +2275,10 @@ func (c *keyManagementRESTClient) ListCryptoKeyVersions(ctx context.Context, req
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListCryptoKeyVersions")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -2346,21 +2359,10 @@ func (c *keyManagementRESTClient) ListImportJobs(ctx context.Context, req *kmspb
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListImportJobs")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -2423,17 +2425,7 @@ func (c *keyManagementRESTClient) GetKeyRing(ctx context.Context, req *kmspb.Get
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetKeyRing")
if err != nil {
return err
}
@@ -2485,17 +2477,7 @@ func (c *keyManagementRESTClient) GetCryptoKey(ctx context.Context, req *kmspb.G
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKey")
if err != nil {
return err
}
@@ -2546,17 +2528,7 @@ func (c *keyManagementRESTClient) GetCryptoKeyVersion(ctx context.Context, req *
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetCryptoKeyVersion")
if err != nil {
return err
}
@@ -2588,6 +2560,9 @@ func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.G
params := url.Values{}
params.Add("$alt", "json;enum-encoding=int")
+ if req.GetPublicKeyFormat() != 0 {
+ params.Add("publicKeyFormat", fmt.Sprintf("%v", req.GetPublicKeyFormat()))
+ }
baseUrl.RawQuery = params.Encode()
@@ -2611,17 +2586,7 @@ func (c *keyManagementRESTClient) GetPublicKey(ctx context.Context, req *kmspb.G
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetPublicKey")
if err != nil {
return err
}
@@ -2671,17 +2636,7 @@ func (c *keyManagementRESTClient) GetImportJob(ctx context.Context, req *kmspb.G
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetImportJob")
if err != nil {
return err
}
@@ -2740,17 +2695,7 @@ func (c *keyManagementRESTClient) CreateKeyRing(ctx context.Context, req *kmspb.
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateKeyRing")
if err != nil {
return err
}
@@ -2816,17 +2761,7 @@ func (c *keyManagementRESTClient) CreateCryptoKey(ctx context.Context, req *kmsp
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKey")
if err != nil {
return err
}
@@ -2888,17 +2823,7 @@ func (c *keyManagementRESTClient) CreateCryptoKeyVersion(ctx context.Context, re
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateCryptoKeyVersion")
if err != nil {
return err
}
@@ -2961,17 +2886,7 @@ func (c *keyManagementRESTClient) ImportCryptoKeyVersion(ctx context.Context, re
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "ImportCryptoKeyVersion")
if err != nil {
return err
}
@@ -3033,17 +2948,7 @@ func (c *keyManagementRESTClient) CreateImportJob(ctx context.Context, req *kmsp
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CreateImportJob")
if err != nil {
return err
}
@@ -3078,11 +2983,11 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp
params := url.Values{}
params.Add("$alt", "json;enum-encoding=int")
if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ field, err := protojson.Marshal(req.GetUpdateMask())
if err != nil {
return nil, err
}
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ params.Add("updateMask", string(field[1:len(field)-1]))
}
baseUrl.RawQuery = params.Encode()
@@ -3107,17 +3012,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKey(ctx context.Context, req *kmsp
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKey")
if err != nil {
return err
}
@@ -3163,11 +3058,11 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re
params := url.Values{}
params.Add("$alt", "json;enum-encoding=int")
if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ field, err := protojson.Marshal(req.GetUpdateMask())
if err != nil {
return nil, err
}
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ params.Add("updateMask", string(field[1:len(field)-1]))
}
baseUrl.RawQuery = params.Encode()
@@ -3192,17 +3087,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyVersion(ctx context.Context, re
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyVersion")
if err != nil {
return err
}
@@ -3263,17 +3148,7 @@ func (c *keyManagementRESTClient) UpdateCryptoKeyPrimaryVersion(ctx context.Cont
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "UpdateCryptoKeyPrimaryVersion")
if err != nil {
return err
}
@@ -3349,17 +3224,7 @@ func (c *keyManagementRESTClient) DestroyCryptoKeyVersion(ctx context.Context, r
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "DestroyCryptoKeyVersion")
if err != nil {
return err
}
@@ -3423,17 +3288,7 @@ func (c *keyManagementRESTClient) RestoreCryptoKeyVersion(ctx context.Context, r
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RestoreCryptoKeyVersion")
if err != nil {
return err
}
@@ -3492,17 +3347,7 @@ func (c *keyManagementRESTClient) Encrypt(ctx context.Context, req *kmspb.Encryp
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Encrypt")
if err != nil {
return err
}
@@ -3561,17 +3406,7 @@ func (c *keyManagementRESTClient) Decrypt(ctx context.Context, req *kmspb.Decryp
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "Decrypt")
if err != nil {
return err
}
@@ -3632,17 +3467,7 @@ func (c *keyManagementRESTClient) RawEncrypt(ctx context.Context, req *kmspb.Raw
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawEncrypt")
if err != nil {
return err
}
@@ -3701,17 +3526,7 @@ func (c *keyManagementRESTClient) RawDecrypt(ctx context.Context, req *kmspb.Raw
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "RawDecrypt")
if err != nil {
return err
}
@@ -3771,17 +3586,7 @@ func (c *keyManagementRESTClient) AsymmetricSign(ctx context.Context, req *kmspb
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricSign")
if err != nil {
return err
}
@@ -3841,17 +3646,7 @@ func (c *keyManagementRESTClient) AsymmetricDecrypt(ctx context.Context, req *km
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "AsymmetricDecrypt")
if err != nil {
return err
}
@@ -3909,17 +3704,7 @@ func (c *keyManagementRESTClient) MacSign(ctx context.Context, req *kmspb.MacSig
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacSign")
if err != nil {
return err
}
@@ -3978,17 +3763,7 @@ func (c *keyManagementRESTClient) MacVerify(ctx context.Context, req *kmspb.MacV
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "MacVerify")
if err != nil {
return err
}
@@ -4045,17 +3820,7 @@ func (c *keyManagementRESTClient) GenerateRandomBytes(ctx context.Context, req *
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "GenerateRandomBytes")
if err != nil {
return err
}
@@ -4105,17 +3870,7 @@ func (c *keyManagementRESTClient) GetLocation(ctx context.Context, req *location
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetLocation")
if err != nil {
return err
}
@@ -4180,21 +3935,10 @@ func (c *keyManagementRESTClient) ListLocations(ctx context.Context, req *locati
}
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListLocations")
if err != nil {
return err
}
-
if err := unm.Unmarshal(buf, resp); err != nil {
return err
}
@@ -4261,17 +4005,7 @@ func (c *keyManagementRESTClient) GetIamPolicy(ctx context.Context, req *iampb.G
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetIamPolicy")
if err != nil {
return err
}
@@ -4331,17 +4065,7 @@ func (c *keyManagementRESTClient) SetIamPolicy(ctx context.Context, req *iampb.S
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "SetIamPolicy")
if err != nil {
return err
}
@@ -4403,17 +4127,7 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i
httpReq = httpReq.WithContext(ctx)
httpReq.Header = headers
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "TestIamPermissions")
if err != nil {
return err
}
@@ -4430,190 +4144,52 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i
return resp, nil
}
-// CryptoKeyIterator manages a stream of *kmspb.CryptoKey.
-type CryptoKeyIterator struct {
- items []*kmspb.CryptoKey
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKey, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *CryptoKeyIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *CryptoKeyIterator) Next() (*kmspb.CryptoKey, error) {
- var item *kmspb.CryptoKey
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *CryptoKeyIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *CryptoKeyIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// CryptoKeyVersionIterator manages a stream of *kmspb.CryptoKeyVersion.
-type CryptoKeyVersionIterator struct {
- items []*kmspb.CryptoKeyVersion
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.CryptoKeyVersion, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *CryptoKeyVersionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *CryptoKeyVersionIterator) Next() (*kmspb.CryptoKeyVersion, error) {
- var item *kmspb.CryptoKeyVersion
- if err := it.nextFunc(); err != nil {
- return item, err
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *keyManagementRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
}
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *CryptoKeyVersionIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *CryptoKeyVersionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// ImportJobIterator manages a stream of *kmspb.ImportJob.
-type ImportJobIterator struct {
- items []*kmspb.ImportJob
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.ImportJob, nextPageToken string, err error)
-}
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *ImportJobIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *ImportJobIterator) Next() (*kmspb.ImportJob, error) {
- var item *kmspb.ImportJob
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
+ baseUrl.RawQuery = params.Encode()
-func (it *ImportJobIterator) bufLen() int {
- return len(it.items)
-}
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-func (it *ImportJobIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
-// KeyRingIterator manages a stream of *kmspb.KeyRing.
-type KeyRingIterator struct {
- items []*kmspb.KeyRing
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*kmspb.KeyRing, nextPageToken string, err error)
-}
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation")
+ if err != nil {
+ return err
+ }
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *KeyRingIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *KeyRingIterator) Next() (*kmspb.KeyRing, error) {
- var item *kmspb.KeyRing
- if err := it.nextFunc(); err != nil {
- return item, err
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
}
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *KeyRingIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *KeyRingIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
+ return resp, nil
}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go
new file mode 100644
index 000000000..e4737aca9
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go
@@ -0,0 +1,786 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/cloud/kms/v1/autokey.proto
+
+package kmspb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Request message for
+// [Autokey.CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle].
+type CreateKeyHandleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the resource project and location to create the
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] in, e.g.
+ // `projects/{PROJECT_ID}/locations/{LOCATION}`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. Id of the [KeyHandle][google.cloud.kms.v1.KeyHandle]. Must be
+ // unique to the resource project and location. If not provided by the caller,
+ // a new UUID is used.
+ KeyHandleId string `protobuf:"bytes,2,opt,name=key_handle_id,json=keyHandleId,proto3" json:"key_handle_id,omitempty"`
+ // Required. [KeyHandle][google.cloud.kms.v1.KeyHandle] to create.
+ KeyHandle *KeyHandle `protobuf:"bytes,3,opt,name=key_handle,json=keyHandle,proto3" json:"key_handle,omitempty"`
+}
+
+func (x *CreateKeyHandleRequest) Reset() {
+ *x = CreateKeyHandleRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateKeyHandleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateKeyHandleRequest) ProtoMessage() {}
+
+func (x *CreateKeyHandleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateKeyHandleRequest.ProtoReflect.Descriptor instead.
+func (*CreateKeyHandleRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateKeyHandleRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateKeyHandleRequest) GetKeyHandleId() string {
+ if x != nil {
+ return x.KeyHandleId
+ }
+ return ""
+}
+
+func (x *CreateKeyHandleRequest) GetKeyHandle() *KeyHandle {
+ if x != nil {
+ return x.KeyHandle
+ }
+ return nil
+}
+
+// Request message for [GetKeyHandle][google.cloud.kms.v1.Autokey.GetKeyHandle].
+type GetKeyHandleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle] resource,
+ // e.g.
+ // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetKeyHandleRequest) Reset() {
+ *x = GetKeyHandleRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetKeyHandleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetKeyHandleRequest) ProtoMessage() {}
+
+func (x *GetKeyHandleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetKeyHandleRequest.ProtoReflect.Descriptor instead.
+func (*GetKeyHandleRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetKeyHandleRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// Resource-oriented representation of a request to Cloud KMS Autokey and the
+// resulting provisioning of a [CryptoKey][google.cloud.kms.v1.CryptoKey].
+type KeyHandle struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle]
+ // resource, e.g.
+ // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Output only. Name of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that has
+ // been provisioned for Customer Managed Encryption Key (CMEK) use in the
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] project and location for the
+ // requested resource type. The [CryptoKey][google.cloud.kms.v1.CryptoKey]
+ // project will reflect the value configured in the
+ // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] on the resource
+ // project's ancestor folder at the time of the
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation. If more than one
+ // ancestor folder has a configured
+ // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig], the nearest of these
+ // configurations is used.
+ KmsKey string `protobuf:"bytes,3,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
+ // Required. Indicates the resource type that the resulting
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey] is meant to protect, e.g.
+ // `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource
+ // types.
+ ResourceTypeSelector string `protobuf:"bytes,4,opt,name=resource_type_selector,json=resourceTypeSelector,proto3" json:"resource_type_selector,omitempty"`
+}
+
+func (x *KeyHandle) Reset() {
+ *x = KeyHandle{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *KeyHandle) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyHandle) ProtoMessage() {}
+
+func (x *KeyHandle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyHandle.ProtoReflect.Descriptor instead.
+func (*KeyHandle) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KeyHandle) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *KeyHandle) GetKmsKey() string {
+ if x != nil {
+ return x.KmsKey
+ }
+ return ""
+}
+
+func (x *KeyHandle) GetResourceTypeSelector() string {
+ if x != nil {
+ return x.ResourceTypeSelector
+ }
+ return ""
+}
+
+// Metadata message for
+// [CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle] long-running
+// operation response.
+type CreateKeyHandleMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *CreateKeyHandleMetadata) Reset() {
+ *x = CreateKeyHandleMetadata{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateKeyHandleMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateKeyHandleMetadata) ProtoMessage() {}
+
+func (x *CreateKeyHandleMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateKeyHandleMetadata.ProtoReflect.Descriptor instead.
+func (*CreateKeyHandleMetadata) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{3}
+}
+
+// Request message for
+// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles].
+type ListKeyHandlesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the resource project and location from which to list
+ // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g.
+ // `projects/{PROJECT_ID}/locations/{LOCATION}`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. Optional limit on the number of
+ // [KeyHandles][google.cloud.kms.v1.KeyHandle] to include in the response. The
+ // service may return fewer than this value. Further
+ // [KeyHandles][google.cloud.kms.v1.KeyHandle] can subsequently be obtained by
+ // including the
+ // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token]
+ // in a subsequent request. If unspecified, at most 100
+ // [KeyHandles][google.cloud.kms.v1.KeyHandle] will be returned.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. Optional pagination token, returned earlier via
+ // [ListKeyHandlesResponse.next_page_token][google.cloud.kms.v1.ListKeyHandlesResponse.next_page_token].
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Optional. Filter to apply when listing
+ // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g.
+ // `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`.
+ Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
+}
+
+func (x *ListKeyHandlesRequest) Reset() {
+ *x = ListKeyHandlesRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListKeyHandlesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListKeyHandlesRequest) ProtoMessage() {}
+
+func (x *ListKeyHandlesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListKeyHandlesRequest.ProtoReflect.Descriptor instead.
+func (*ListKeyHandlesRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListKeyHandlesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListKeyHandlesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListKeyHandlesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListKeyHandlesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+// Response message for
+// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles].
+type ListKeyHandlesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Resulting [KeyHandles][google.cloud.kms.v1.KeyHandle].
+ KeyHandles []*KeyHandle `protobuf:"bytes,1,rep,name=key_handles,json=keyHandles,proto3" json:"key_handles,omitempty"`
+ // A token to retrieve next page of results. Pass this value in
+ // [ListKeyHandlesRequest.page_token][google.cloud.kms.v1.ListKeyHandlesRequest.page_token]
+ // to retrieve the next page of results.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListKeyHandlesResponse) Reset() {
+ *x = ListKeyHandlesResponse{}
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListKeyHandlesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListKeyHandlesResponse) ProtoMessage() {}
+
+func (x *ListKeyHandlesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListKeyHandlesResponse.ProtoReflect.Descriptor instead.
+func (*ListKeyHandlesResponse) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListKeyHandlesResponse) GetKeyHandles() []*KeyHandle {
+ if x != nil {
+ return x.KeyHandles
+ }
+ return nil
+}
+
+func (x *ListKeyHandlesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+var File_google_cloud_kms_v1_autokey_proto protoreflect.FileDescriptor
+
+var file_google_cloud_kms_v1_autokey_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b,
+ 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
+ 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2f,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27,
+ 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6b, 0x65, 0x79, 0x48,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x09, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x54, 0x0a, 0x13, 0x47,
+ 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x22, 0xa3, 0x02, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12,
+ 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x03, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x16,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x53,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x7e, 0xea, 0x41, 0x7b, 0x0a, 0x21, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12,
+ 0x3f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64,
+ 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x7d,
+ 0x2a, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0x09, 0x6b, 0x65,
+ 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x22, 0xbd, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
+ 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
+ 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x22, 0x81, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a,
+ 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64,
+ 0x6c, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x26,
+ 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xb4, 0x05, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x6f, 0x6b,
+ 0x65, 0x79, 0x12, 0xeb, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
+ 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0x8b, 0x01, 0xca, 0x41, 0x24, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x12, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x1f, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x22, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73,
+ 0x12, 0x97, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xaa, 0x01, 0x0a, 0x0e, 0x4c,
+ 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
+ 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x54, 0x0a,
+ 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65,
+ 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d,
+ 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d,
+ 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_cloud_kms_v1_autokey_proto_rawDescOnce sync.Once
+ file_google_cloud_kms_v1_autokey_proto_rawDescData = file_google_cloud_kms_v1_autokey_proto_rawDesc
+)
+
+func file_google_cloud_kms_v1_autokey_proto_rawDescGZIP() []byte {
+ file_google_cloud_kms_v1_autokey_proto_rawDescOnce.Do(func() {
+ file_google_cloud_kms_v1_autokey_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_proto_rawDescData)
+ })
+ return file_google_cloud_kms_v1_autokey_proto_rawDescData
+}
+
+var file_google_cloud_kms_v1_autokey_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_google_cloud_kms_v1_autokey_proto_goTypes = []any{
+ (*CreateKeyHandleRequest)(nil), // 0: google.cloud.kms.v1.CreateKeyHandleRequest
+ (*GetKeyHandleRequest)(nil), // 1: google.cloud.kms.v1.GetKeyHandleRequest
+ (*KeyHandle)(nil), // 2: google.cloud.kms.v1.KeyHandle
+ (*CreateKeyHandleMetadata)(nil), // 3: google.cloud.kms.v1.CreateKeyHandleMetadata
+ (*ListKeyHandlesRequest)(nil), // 4: google.cloud.kms.v1.ListKeyHandlesRequest
+ (*ListKeyHandlesResponse)(nil), // 5: google.cloud.kms.v1.ListKeyHandlesResponse
+ (*longrunningpb.Operation)(nil), // 6: google.longrunning.Operation
+}
+var file_google_cloud_kms_v1_autokey_proto_depIdxs = []int32{
+ 2, // 0: google.cloud.kms.v1.CreateKeyHandleRequest.key_handle:type_name -> google.cloud.kms.v1.KeyHandle
+ 2, // 1: google.cloud.kms.v1.ListKeyHandlesResponse.key_handles:type_name -> google.cloud.kms.v1.KeyHandle
+ 0, // 2: google.cloud.kms.v1.Autokey.CreateKeyHandle:input_type -> google.cloud.kms.v1.CreateKeyHandleRequest
+ 1, // 3: google.cloud.kms.v1.Autokey.GetKeyHandle:input_type -> google.cloud.kms.v1.GetKeyHandleRequest
+ 4, // 4: google.cloud.kms.v1.Autokey.ListKeyHandles:input_type -> google.cloud.kms.v1.ListKeyHandlesRequest
+ 6, // 5: google.cloud.kms.v1.Autokey.CreateKeyHandle:output_type -> google.longrunning.Operation
+ 2, // 6: google.cloud.kms.v1.Autokey.GetKeyHandle:output_type -> google.cloud.kms.v1.KeyHandle
+ 5, // 7: google.cloud.kms.v1.Autokey.ListKeyHandles:output_type -> google.cloud.kms.v1.ListKeyHandlesResponse
+ 5, // [5:8] is the sub-list for method output_type
+ 2, // [2:5] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_cloud_kms_v1_autokey_proto_init() }
+func file_google_cloud_kms_v1_autokey_proto_init() {
+ if File_google_cloud_kms_v1_autokey_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_cloud_kms_v1_autokey_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_cloud_kms_v1_autokey_proto_goTypes,
+ DependencyIndexes: file_google_cloud_kms_v1_autokey_proto_depIdxs,
+ MessageInfos: file_google_cloud_kms_v1_autokey_proto_msgTypes,
+ }.Build()
+ File_google_cloud_kms_v1_autokey_proto = out.File
+ file_google_cloud_kms_v1_autokey_proto_rawDesc = nil
+ file_google_cloud_kms_v1_autokey_proto_goTypes = nil
+ file_google_cloud_kms_v1_autokey_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// AutokeyClient is the client API for Autokey service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AutokeyClient interface {
+ // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the
+ // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK
+ // use with the given resource type in the configured key project and the same
+ // location. [GetOperation][google.longrunning.Operations.GetOperation] should
+ // be used to resolve the resulting long-running operation and get the
+ // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey].
+ CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle].
+ GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error)
+ // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle].
+ ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error)
+}
+
+type autokeyClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewAutokeyClient(cc grpc.ClientConnInterface) AutokeyClient {
+ return &autokeyClient{cc}
+}
+
+func (c *autokeyClient) CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/CreateKeyHandle", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *autokeyClient) GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error) {
+ out := new(KeyHandle)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/GetKeyHandle", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *autokeyClient) ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error) {
+ out := new(ListKeyHandlesResponse)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/ListKeyHandles", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AutokeyServer is the server API for Autokey service.
+type AutokeyServer interface {
+ // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the
+ // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK
+ // use with the given resource type in the configured key project and the same
+ // location. [GetOperation][google.longrunning.Operations.GetOperation] should
+ // be used to resolve the resulting long-running operation and get the
+ // resulting [KeyHandle][google.cloud.kms.v1.KeyHandle] and
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey].
+ CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error)
+ // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle].
+ GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error)
+ // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle].
+ ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error)
+}
+
+// UnimplementedAutokeyServer can be embedded to have forward compatible implementations.
+type UnimplementedAutokeyServer struct {
+}
+
+func (*UnimplementedAutokeyServer) CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateKeyHandle not implemented")
+}
+func (*UnimplementedAutokeyServer) GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetKeyHandle not implemented")
+}
+func (*UnimplementedAutokeyServer) ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListKeyHandles not implemented")
+}
+
+func RegisterAutokeyServer(s *grpc.Server, srv AutokeyServer) {
+ s.RegisterService(&_Autokey_serviceDesc, srv)
+}
+
+func _Autokey_CreateKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateKeyHandleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyServer).CreateKeyHandle(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.Autokey/CreateKeyHandle",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyServer).CreateKeyHandle(ctx, req.(*CreateKeyHandleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Autokey_GetKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetKeyHandleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyServer).GetKeyHandle(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.Autokey/GetKeyHandle",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyServer).GetKeyHandle(ctx, req.(*GetKeyHandleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Autokey_ListKeyHandles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListKeyHandlesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyServer).ListKeyHandles(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.Autokey/ListKeyHandles",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyServer).ListKeyHandles(ctx, req.(*ListKeyHandlesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Autokey_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.cloud.kms.v1.Autokey",
+ HandlerType: (*AutokeyServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateKeyHandle",
+ Handler: _Autokey_CreateKeyHandle_Handler,
+ },
+ {
+ MethodName: "GetKeyHandle",
+ Handler: _Autokey_GetKeyHandle_Handler,
+ },
+ {
+ MethodName: "ListKeyHandles",
+ Handler: _Autokey_ListKeyHandles_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/cloud/kms/v1/autokey.proto",
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go
new file mode 100644
index 000000000..bb1abb0af
--- /dev/null
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go
@@ -0,0 +1,743 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/cloud/kms/v1/autokey_admin.proto
+
+package kmspb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The states AutokeyConfig can be in.
+type AutokeyConfig_State int32
+
+const (
+ // The state of the AutokeyConfig is unspecified.
+ AutokeyConfig_STATE_UNSPECIFIED AutokeyConfig_State = 0
+ // The AutokeyConfig is currently active.
+ AutokeyConfig_ACTIVE AutokeyConfig_State = 1
+ // A previously configured key project has been deleted and the current
+ // AutokeyConfig is unusable.
+ AutokeyConfig_KEY_PROJECT_DELETED AutokeyConfig_State = 2
+ // The AutokeyConfig is not yet initialized or has been reset to its default
+ // uninitialized state.
+ AutokeyConfig_UNINITIALIZED AutokeyConfig_State = 3
+)
+
+// Enum value maps for AutokeyConfig_State.
+var (
+ AutokeyConfig_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "ACTIVE",
+ 2: "KEY_PROJECT_DELETED",
+ 3: "UNINITIALIZED",
+ }
+ AutokeyConfig_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "ACTIVE": 1,
+ "KEY_PROJECT_DELETED": 2,
+ "UNINITIALIZED": 3,
+ }
+)
+
+func (x AutokeyConfig_State) Enum() *AutokeyConfig_State {
+ p := new(AutokeyConfig_State)
+ *p = x
+ return p
+}
+
+func (x AutokeyConfig_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AutokeyConfig_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0].Descriptor()
+}
+
+func (AutokeyConfig_State) Type() protoreflect.EnumType {
+ return &file_google_cloud_kms_v1_autokey_admin_proto_enumTypes[0]
+}
+
+func (x AutokeyConfig_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AutokeyConfig_State.Descriptor instead.
+func (AutokeyConfig_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// Request message for
+// [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig].
+type UpdateAutokeyConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] with values to
+ // update.
+ AutokeyConfig *AutokeyConfig `protobuf:"bytes,1,opt,name=autokey_config,json=autokeyConfig,proto3" json:"autokey_config,omitempty"`
+ // Required. Masks which fields of the
+ // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] to update, e.g.
+ // `keyProject`.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateAutokeyConfigRequest) Reset() {
+ *x = UpdateAutokeyConfigRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateAutokeyConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateAutokeyConfigRequest) ProtoMessage() {}
+
+func (x *UpdateAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateAutokeyConfigRequest.ProtoReflect.Descriptor instead.
+func (*UpdateAutokeyConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UpdateAutokeyConfigRequest) GetAutokeyConfig() *AutokeyConfig {
+ if x != nil {
+ return x.AutokeyConfig
+ }
+ return nil
+}
+
+func (x *UpdateAutokeyConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// Request message for
+// [GetAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig].
+type GetAutokeyConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig]
+ // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetAutokeyConfigRequest) Reset() {
+ *x = GetAutokeyConfigRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetAutokeyConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAutokeyConfigRequest) ProtoMessage() {}
+
+func (x *GetAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAutokeyConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetAutokeyConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetAutokeyConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// Cloud KMS Autokey configuration for a folder.
+type AutokeyConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig]
+ // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or
+ // `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey] when a
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] is created. On
+ // [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig],
+ // the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on
+ // this key project. Once configured, for Cloud KMS Autokey to function
+ // properly, this key project must have the Cloud KMS API activated and the
+ // Cloud KMS Service Agent for this key project must be granted the
+ // `cloudkms.admin` role (or pertinent permissions). A request with an empty
+ // key project field will clear the configuration.
+ KeyProject string `protobuf:"bytes,2,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"`
+ // Output only. The state for the AutokeyConfig.
+ State AutokeyConfig_State `protobuf:"varint,4,opt,name=state,proto3,enum=google.cloud.kms.v1.AutokeyConfig_State" json:"state,omitempty"`
+}
+
+func (x *AutokeyConfig) Reset() {
+ *x = AutokeyConfig{}
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AutokeyConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AutokeyConfig) ProtoMessage() {}
+
+func (x *AutokeyConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AutokeyConfig.ProtoReflect.Descriptor instead.
+func (*AutokeyConfig) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AutokeyConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AutokeyConfig) GetKeyProject() string {
+ if x != nil {
+ return x.KeyProject
+ }
+ return ""
+}
+
+func (x *AutokeyConfig) GetState() AutokeyConfig_State {
+ if x != nil {
+ return x.State
+ }
+ return AutokeyConfig_STATE_UNSPECIFIED
+}
+
+// Request message for
+// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig].
+type ShowEffectiveAutokeyConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the resource project to the show effective Cloud KMS
+ // Autokey configuration for. This may be helpful for interrogating the effect
+ // of nested folder configurations on a given resource project.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+}
+
+func (x *ShowEffectiveAutokeyConfigRequest) Reset() {
+ *x = ShowEffectiveAutokeyConfigRequest{}
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ShowEffectiveAutokeyConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ShowEffectiveAutokeyConfigRequest) ProtoMessage() {}
+
+func (x *ShowEffectiveAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ShowEffectiveAutokeyConfigRequest.ProtoReflect.Descriptor instead.
+func (*ShowEffectiveAutokeyConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ShowEffectiveAutokeyConfigRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+// Response message for
+// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig].
+type ShowEffectiveAutokeyConfigResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the key project configured in the resource project's folder
+ // ancestry.
+ KeyProject string `protobuf:"bytes,1,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"`
+}
+
+func (x *ShowEffectiveAutokeyConfigResponse) Reset() {
+ *x = ShowEffectiveAutokeyConfigResponse{}
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ShowEffectiveAutokeyConfigResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ShowEffectiveAutokeyConfigResponse) ProtoMessage() {}
+
+func (x *ShowEffectiveAutokeyConfigResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ShowEffectiveAutokeyConfigResponse.ProtoReflect.Descriptor instead.
+func (*ShowEffectiveAutokeyConfigResponse) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ShowEffectiveAutokeyConfigResponse) GetKeyProject() string {
+ if x != nil {
+ return x.KeyProject
+ }
+ return ""
+}
+
+var File_google_cloud_kms_v1_autokey_admin_proto protoreflect.FileDescriptor
+
+var file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b,
+ 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
+ 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75,
+ 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d,
+ 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b,
+ 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41,
+ 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x22, 0xd6, 0x02, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a,
+ 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x43, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x56, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49,
+ 0x56, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x4a,
+ 0x45, 0x43, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x11, 0x0a,
+ 0x0d, 0x55, 0x4e, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x45, 0x44, 0x10, 0x03,
+ 0x3a, 0x69, 0xea, 0x41, 0x66, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41,
+ 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x66, 0x6f,
+ 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61,
+ 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a, 0x0e, 0x61, 0x75,
+ 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x32, 0x0d, 0x61, 0x75,
+ 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x70, 0x0a, 0x21, 0x53,
+ 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f,
+ 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x45, 0x0a,
+ 0x22, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75,
+ 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x32, 0xc8, 0x05, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
+ 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xd2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65,
+ 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x66, 0xda, 0x41, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x61, 0x75,
+ 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f,
+ 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x97, 0x01, 0x0a, 0x10, 0x47,
+ 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24,
+ 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x7d, 0x12, 0xd2, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66,
+ 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41,
+ 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73,
+ 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f,
+ 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c,
+ 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42,
+ 0x59, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x41, 0x75, 0x74, 0x6f,
+ 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b,
+ 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce sync.Once
+ file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = file_google_cloud_kms_v1_autokey_admin_proto_rawDesc
+)
+
+func file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP() []byte {
+ file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce.Do(func() {
+ file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_admin_proto_rawDescData)
+ })
+ return file_google_cloud_kms_v1_autokey_admin_proto_rawDescData
+}
+
+var file_google_cloud_kms_v1_autokey_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_cloud_kms_v1_autokey_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_cloud_kms_v1_autokey_admin_proto_goTypes = []any{
+ (AutokeyConfig_State)(0), // 0: google.cloud.kms.v1.AutokeyConfig.State
+ (*UpdateAutokeyConfigRequest)(nil), // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest
+ (*GetAutokeyConfigRequest)(nil), // 2: google.cloud.kms.v1.GetAutokeyConfigRequest
+ (*AutokeyConfig)(nil), // 3: google.cloud.kms.v1.AutokeyConfig
+ (*ShowEffectiveAutokeyConfigRequest)(nil), // 4: google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest
+ (*ShowEffectiveAutokeyConfigResponse)(nil), // 5: google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse
+ (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask
+}
+var file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = []int32{
+ 3, // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest.autokey_config:type_name -> google.cloud.kms.v1.AutokeyConfig
+ 6, // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 0, // 2: google.cloud.kms.v1.AutokeyConfig.state:type_name -> google.cloud.kms.v1.AutokeyConfig.State
+ 1, // 3: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:input_type -> google.cloud.kms.v1.UpdateAutokeyConfigRequest
+ 2, // 4: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:input_type -> google.cloud.kms.v1.GetAutokeyConfigRequest
+ 4, // 5: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:input_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest
+ 3, // 6: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig
+ 3, // 7: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig
+ 5, // 8: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:output_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse
+ 6, // [6:9] is the sub-list for method output_type
+ 3, // [3:6] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_google_cloud_kms_v1_autokey_admin_proto_init() }
+func file_google_cloud_kms_v1_autokey_admin_proto_init() {
+ if File_google_cloud_kms_v1_autokey_admin_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_cloud_kms_v1_autokey_admin_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_cloud_kms_v1_autokey_admin_proto_goTypes,
+ DependencyIndexes: file_google_cloud_kms_v1_autokey_admin_proto_depIdxs,
+ EnumInfos: file_google_cloud_kms_v1_autokey_admin_proto_enumTypes,
+ MessageInfos: file_google_cloud_kms_v1_autokey_admin_proto_msgTypes,
+ }.Build()
+ File_google_cloud_kms_v1_autokey_admin_proto = out.File
+ file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = nil
+ file_google_cloud_kms_v1_autokey_admin_proto_goTypes = nil
+ file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// AutokeyAdminClient is the client API for AutokeyAdmin service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AutokeyAdminClient interface {
+ // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
+ // folder. The caller must have both `cloudkms.autokeyConfigs.update`
+ // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy`
+ // permission on the provided key project. A
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's
+ // descendant projects will use this configuration to determine where to
+ // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey].
+ UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error)
+ // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
+ // folder.
+ GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error)
+ // Returns the effective Cloud KMS Autokey configuration for a given project.
+ ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error)
+}
+
+type autokeyAdminClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewAutokeyAdminClient(cc grpc.ClientConnInterface) AutokeyAdminClient {
+ return &autokeyAdminClient{cc}
+}
+
+func (c *autokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) {
+ out := new(AutokeyConfig)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *autokeyAdminClient) GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) {
+ out := new(AutokeyConfig)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *autokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error) {
+ out := new(ShowEffectiveAutokeyConfigResponse)
+ err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AutokeyAdminServer is the server API for AutokeyAdmin service.
+type AutokeyAdminServer interface {
+ // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
+ // folder. The caller must have both `cloudkms.autokeyConfigs.update`
+ // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy`
+ // permission on the provided key project. A
+ // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's
+ // descendant projects will use this configuration to determine where to
+ // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey].
+ UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error)
+ // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
+ // folder.
+ GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error)
+ // Returns the effective Cloud KMS Autokey configuration for a given project.
+ ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error)
+}
+
+// UnimplementedAutokeyAdminServer can be embedded to have forward compatible implementations.
+type UnimplementedAutokeyAdminServer struct {
+}
+
+func (*UnimplementedAutokeyAdminServer) UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateAutokeyConfig not implemented")
+}
+func (*UnimplementedAutokeyAdminServer) GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAutokeyConfig not implemented")
+}
+func (*UnimplementedAutokeyAdminServer) ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ShowEffectiveAutokeyConfig not implemented")
+}
+
+func RegisterAutokeyAdminServer(s *grpc.Server, srv AutokeyAdminServer) {
+ s.RegisterService(&_AutokeyAdmin_serviceDesc, srv)
+}
+
+func _AutokeyAdmin_UpdateAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateAutokeyConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, req.(*UpdateAutokeyConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AutokeyAdmin_GetAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAutokeyConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, req.(*GetAutokeyConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ShowEffectiveAutokeyConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, req.(*ShowEffectiveAutokeyConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _AutokeyAdmin_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.cloud.kms.v1.AutokeyAdmin",
+ HandlerType: (*AutokeyAdminServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "UpdateAutokeyConfig",
+ Handler: _AutokeyAdmin_UpdateAutokeyConfig_Handler,
+ },
+ {
+ MethodName: "GetAutokeyConfig",
+ Handler: _AutokeyAdmin_GetAutokeyConfig_Handler,
+ },
+ {
+ MethodName: "ShowEffectiveAutokeyConfig",
+ Handler: _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/cloud/kms/v1/autokey_admin.proto",
+}
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
index fdc98c084..d0739cca5 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/cloud/kms/v1/ekm_service.proto
package kmspb
@@ -57,26 +57,30 @@ const (
// * When creating a
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] associated with
// this
- // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must
- // supply the key path of pre-existing external key material that will be
- // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
- // * Destruction of external key material cannot be requested via the
- // Cloud KMS API and must be performed directly in the EKM.
- // * Automatic rotation of key material is not supported.
+ //
+ // [EkmConnection][google.cloud.kms.v1.EkmConnection], the caller must
+ // supply the key path of pre-existing external key material that will be
+ // linked to the [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
+ // - Destruction of external key material cannot be requested via the
+ // Cloud KMS API and must be performed directly in the EKM.
+ // - Automatic rotation of key material is not supported.
EkmConnection_MANUAL EkmConnection_KeyManagementMode = 1
// All [CryptoKeys][google.cloud.kms.v1.CryptoKey] created with this
// [EkmConnection][google.cloud.kms.v1.EkmConnection] use EKM-side key
// management operations initiated from Cloud KMS. This means that:
+ //
// * When a [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
// associated with this [EkmConnection][google.cloud.kms.v1.EkmConnection]
// is
- // created, the EKM automatically generates new key material and a new
- // key path. The caller cannot supply the key path of pre-existing
- // external key material.
- // * Destruction of external key material associated with this
- // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by
- // calling [DestroyCryptoKeyVersion][EkmService.DestroyCryptoKeyVersion].
- // * Automatic rotation of key material is supported.
+ //
+ // created, the EKM automatically generates new key material and a new
+ // key path. The caller cannot supply the key path of pre-existing
+ // external key material.
+ // - Destruction of external key material associated with this
+ // [EkmConnection][google.cloud.kms.v1.EkmConnection] can be requested by
+ // calling
+ // [DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion].
+ // - Automatic rotation of key material is supported.
EkmConnection_CLOUD_KMS EkmConnection_KeyManagementMode = 2
)
@@ -157,11 +161,9 @@ type ListEkmConnectionsRequest struct {
func (x *ListEkmConnectionsRequest) Reset() {
*x = ListEkmConnectionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListEkmConnectionsRequest) String() string {
@@ -172,7 +174,7 @@ func (*ListEkmConnectionsRequest) ProtoMessage() {}
func (x *ListEkmConnectionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -242,11 +244,9 @@ type ListEkmConnectionsResponse struct {
func (x *ListEkmConnectionsResponse) Reset() {
*x = ListEkmConnectionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListEkmConnectionsResponse) String() string {
@@ -257,7 +257,7 @@ func (*ListEkmConnectionsResponse) ProtoMessage() {}
func (x *ListEkmConnectionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -307,11 +307,9 @@ type GetEkmConnectionRequest struct {
func (x *GetEkmConnectionRequest) Reset() {
*x = GetEkmConnectionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetEkmConnectionRequest) String() string {
@@ -322,7 +320,7 @@ func (*GetEkmConnectionRequest) ProtoMessage() {}
func (x *GetEkmConnectionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -365,11 +363,9 @@ type CreateEkmConnectionRequest struct {
func (x *CreateEkmConnectionRequest) Reset() {
*x = CreateEkmConnectionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateEkmConnectionRequest) String() string {
@@ -380,7 +376,7 @@ func (*CreateEkmConnectionRequest) ProtoMessage() {}
func (x *CreateEkmConnectionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -432,11 +428,9 @@ type UpdateEkmConnectionRequest struct {
func (x *UpdateEkmConnectionRequest) Reset() {
*x = UpdateEkmConnectionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateEkmConnectionRequest) String() string {
@@ -447,7 +441,7 @@ func (*UpdateEkmConnectionRequest) ProtoMessage() {}
func (x *UpdateEkmConnectionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -490,11 +484,9 @@ type GetEkmConfigRequest struct {
func (x *GetEkmConfigRequest) Reset() {
*x = GetEkmConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetEkmConfigRequest) String() string {
@@ -505,7 +497,7 @@ func (*GetEkmConfigRequest) ProtoMessage() {}
func (x *GetEkmConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -542,11 +534,9 @@ type UpdateEkmConfigRequest struct {
func (x *UpdateEkmConfigRequest) Reset() {
*x = UpdateEkmConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateEkmConfigRequest) String() string {
@@ -557,7 +547,7 @@ func (*UpdateEkmConfigRequest) ProtoMessage() {}
func (x *UpdateEkmConfigRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -622,11 +612,9 @@ type Certificate struct {
func (x *Certificate) Reset() {
*x = Certificate{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Certificate) String() string {
@@ -637,7 +625,7 @@ func (*Certificate) ProtoMessage() {}
func (x *Certificate) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -720,7 +708,7 @@ func (x *Certificate) GetSha256Fingerprint() string {
// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and
// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a
// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of
-// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], as well as
+// [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], as well as
// performing cryptographic operations using keys created within the
// [EkmConnection][google.cloud.kms.v1.EkmConnection].
type EkmConnection struct {
@@ -735,7 +723,7 @@ type EkmConnection struct {
// Output only. The time at which the
// [EkmConnection][google.cloud.kms.v1.EkmConnection] was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // A list of
+ // Optional. A list of
// [ServiceResolvers][google.cloud.kms.v1.EkmConnection.ServiceResolver] where
// the EKM can be reached. There should be one ServiceResolver per EKM
// replica. Currently, only a single
@@ -759,11 +747,9 @@ type EkmConnection struct {
func (x *EkmConnection) Reset() {
*x = EkmConnection{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EkmConnection) String() string {
@@ -774,7 +760,7 @@ func (*EkmConnection) ProtoMessage() {}
func (x *EkmConnection) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -836,7 +822,7 @@ func (x *EkmConnection) GetCryptoSpacePath() string {
// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and
// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] with a
// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of
-// [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC] in a given
+// [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC] in a given
// project and location.
type EkmConfig struct {
state protoimpl.MessageState
@@ -855,11 +841,9 @@ type EkmConfig struct {
func (x *EkmConfig) Reset() {
*x = EkmConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EkmConfig) String() string {
@@ -870,7 +854,7 @@ func (*EkmConfig) ProtoMessage() {}
func (x *EkmConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -913,11 +897,9 @@ type VerifyConnectivityRequest struct {
func (x *VerifyConnectivityRequest) Reset() {
*x = VerifyConnectivityRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *VerifyConnectivityRequest) String() string {
@@ -928,7 +910,7 @@ func (*VerifyConnectivityRequest) ProtoMessage() {}
func (x *VerifyConnectivityRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -960,11 +942,9 @@ type VerifyConnectivityResponse struct {
func (x *VerifyConnectivityResponse) Reset() {
*x = VerifyConnectivityResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *VerifyConnectivityResponse) String() string {
@@ -975,7 +955,7 @@ func (*VerifyConnectivityResponse) ProtoMessage() {}
func (x *VerifyConnectivityResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1019,11 +999,9 @@ type EkmConnection_ServiceResolver struct {
func (x *EkmConnection_ServiceResolver) Reset() {
*x = EkmConnection_ServiceResolver{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EkmConnection_ServiceResolver) String() string {
@@ -1034,7 +1012,7 @@ func (*EkmConnection_ServiceResolver) ProtoMessage() {}
func (x *EkmConnection_ServiceResolver) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1195,195 +1173,195 @@ var file_google_cloud_kms_v1_ekm_service_proto_rawDesc = []byte{
0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e,
0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x73, 0x68,
0x61, 0x32, 0x35, 0x36, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22,
- 0xf2, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0xf7, 0x06, 0x0a, 0x0d, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72,
0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5f, 0x0a, 0x11,
+ 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x64, 0x0a, 0x11,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b,
0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x10, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x17, 0x0a,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
- 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61,
- 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11,
- 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64,
- 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x73, 0x70, 0x61, 0x63,
- 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61,
- 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
- 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41,
- 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
- 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f,
- 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x11, 0x4b, 0x65,
- 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12,
- 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x4d, 0x45, 0x4e,
- 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e, 0x55, 0x41, 0x4c, 0x10, 0x01,
- 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b, 0x4d, 0x53, 0x10, 0x02, 0x3a,
- 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b,
- 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x16, 0x64,
- 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x01,
+ 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
+ 0x72, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x69, 0x0a, 0x13, 0x6b,
+ 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f,
+ 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45,
+ 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4b, 0x65, 0x79,
+ 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x2f, 0x0a, 0x11, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x53, 0x70,
+ 0x61, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x1a, 0xa5, 0x02, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2f,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x68,
+ 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22,
+ 0x53, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
+ 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x1f, 0x4b, 0x45, 0x59, 0x5f, 0x4d, 0x41, 0x4e, 0x41,
+ 0x47, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+ 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x4e,
+ 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x5f, 0x4b,
+ 0x4d, 0x53, 0x10, 0x02, 0x3a, 0x73, 0xea, 0x41, 0x70, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x47, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xe4, 0x01, 0x0a, 0x09, 0x45, 0x6b,
+ 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x63, 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6b, 0x6d, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x2d, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x22, 0x5e, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02,
0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d,
- 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x3a, 0x59, 0xea, 0x41, 0x56, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45,
- 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5e, 0x0a, 0x19, 0x56,
- 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56,
- 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74,
- 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc, 0x0b, 0x0a, 0x0a, 0x45, 0x6b,
- 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73,
- 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xdc,
+ 0x0b, 0x0a, 0x0a, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xba, 0x01,
+ 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45,
+ 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45,
+ 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47,
+ 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34,
+ 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45,
+ 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x74, 0xda, 0x41, 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65,
+ 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x44, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02,
- 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa7, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d,
- 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45,
- 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x41, 0xda, 0x41,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b,
- 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0xe0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45,
- 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x74, 0xda, 0x41,
- 0x27, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f,
- 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0e,
- 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x32,
- 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a,
+ 0x0c, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b,
+ 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x7d, 0x12, 0xc3, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b,
+ 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x22, 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65,
+ 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65,
+ 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79,
+ 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x54, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47,
+ 0x12, 0x45, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2f, 0x2a, 0x7d, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0xe2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d,
- 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0x76, 0xda, 0x41, 0x1a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x0e, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x94, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x45,
- 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47,
- 0x65, 0x74, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
- 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xc3,
- 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45,
- 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22,
- 0x63, 0xda, 0x41, 0x16, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44,
- 0x3a, 0x0a, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x36, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
- 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
- 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0xda, 0x41,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x12, 0x45, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x65, 0x6b,
- 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a,
- 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69,
- 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41,
- 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x85, 0x02, 0xea, 0x41, 0x7c, 0x0a, 0x27,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79,
+ 0x2f, 0x2a, 0x2f, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
+ 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
- 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f,
- 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70,
- 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x82, 0x02,
+ 0xea, 0x41, 0x7c, 0x0a, 0x27, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x51, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
+ 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x0a,
+ 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0f, 0x45, 0x6b, 0x6d, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62,
+ 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1400,7 +1378,7 @@ func file_google_cloud_kms_v1_ekm_service_proto_rawDescGZIP() []byte {
var file_google_cloud_kms_v1_ekm_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_cloud_kms_v1_ekm_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
-var file_google_cloud_kms_v1_ekm_service_proto_goTypes = []interface{}{
+var file_google_cloud_kms_v1_ekm_service_proto_goTypes = []any{
(EkmConnection_KeyManagementMode)(0), // 0: google.cloud.kms.v1.EkmConnection.KeyManagementMode
(*ListEkmConnectionsRequest)(nil), // 1: google.cloud.kms.v1.ListEkmConnectionsRequest
(*ListEkmConnectionsResponse)(nil), // 2: google.cloud.kms.v1.ListEkmConnectionsResponse
@@ -1457,164 +1435,6 @@ func file_google_cloud_kms_v1_ekm_service_proto_init() {
if File_google_cloud_kms_v1_ekm_service_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListEkmConnectionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListEkmConnectionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetEkmConnectionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateEkmConnectionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateEkmConnectionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetEkmConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateEkmConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Certificate); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EkmConnection); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EkmConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VerifyConnectivityRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*VerifyConnectivityResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_ekm_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EkmConnection_ServiceResolver); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
index e4b300d9d..f39cad8bc 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/cloud/kms/v1/resources.proto
package kmspb
@@ -102,6 +102,123 @@ func (ProtectionLevel) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{0}
}
+// Describes the reason for a data access. Please refer to
+// https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes
+// for the detailed semantic meaning of justification reason codes.
+type AccessReason int32
+
+const (
+ // Unspecified access reason.
+ AccessReason_REASON_UNSPECIFIED AccessReason = 0
+ // Customer-initiated support.
+ AccessReason_CUSTOMER_INITIATED_SUPPORT AccessReason = 1
+ // Google-initiated access for system management and troubleshooting.
+ AccessReason_GOOGLE_INITIATED_SERVICE AccessReason = 2
+ // Google-initiated access in response to a legal request or legal process.
+ AccessReason_THIRD_PARTY_DATA_REQUEST AccessReason = 3
+ // Google-initiated access for security, fraud, abuse, or compliance purposes.
+ AccessReason_GOOGLE_INITIATED_REVIEW AccessReason = 4
+ // Customer uses their account to perform any access to their own data which
+ // their IAM policy authorizes.
+ AccessReason_CUSTOMER_INITIATED_ACCESS AccessReason = 5
+ // Google systems access customer data to help optimize the structure of the
+ // data or quality for future uses by the customer.
+ AccessReason_GOOGLE_INITIATED_SYSTEM_OPERATION AccessReason = 6
+ // No reason is expected for this key request.
+ AccessReason_REASON_NOT_EXPECTED AccessReason = 7
+ // Customer uses their account to perform any access to their own data which
+ // their IAM policy authorizes, and one of the following is true:
+ //
+ // - A Google administrator has reset the root-access account associated with
+ // the user's organization within the past 7 days.
+ // - A Google-initiated emergency access operation has interacted with a
+ // resource in the same project or folder as the currently accessed resource
+ // within the past 7 days.
+ AccessReason_MODIFIED_CUSTOMER_INITIATED_ACCESS AccessReason = 8
+ // Google systems access customer data to help optimize the structure of the
+ // data or quality for future uses by the customer, and one of the following
+ // is true:
+ //
+ // - A Google administrator has reset the root-access account associated with
+ // the user's organization within the past 7 days.
+ // - A Google-initiated emergency access operation has interacted with a
+ // resource in the same project or folder as the currently accessed resource
+ // within the past 7 days.
+ AccessReason_MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION AccessReason = 9
+ // Google-initiated access to maintain system reliability.
+ AccessReason_GOOGLE_RESPONSE_TO_PRODUCTION_ALERT AccessReason = 10
+ // One of the following operations is being executed while simultaneously
+ // encountering an internal technical issue which prevented a more precise
+ // justification code from being generated:
+ //
+ // - Your account has been used to perform any access to your own data which
+ // your IAM policy authorizes.
+ // - An automated Google system operates on encrypted customer data which your
+ // IAM policy authorizes.
+ // - Customer-initiated Google support access.
+ // - Google-initiated support access to protect system reliability.
+ AccessReason_CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING AccessReason = 11
+)
+
+// Enum value maps for AccessReason.
+var (
+ AccessReason_name = map[int32]string{
+ 0: "REASON_UNSPECIFIED",
+ 1: "CUSTOMER_INITIATED_SUPPORT",
+ 2: "GOOGLE_INITIATED_SERVICE",
+ 3: "THIRD_PARTY_DATA_REQUEST",
+ 4: "GOOGLE_INITIATED_REVIEW",
+ 5: "CUSTOMER_INITIATED_ACCESS",
+ 6: "GOOGLE_INITIATED_SYSTEM_OPERATION",
+ 7: "REASON_NOT_EXPECTED",
+ 8: "MODIFIED_CUSTOMER_INITIATED_ACCESS",
+ 9: "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION",
+ 10: "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT",
+ 11: "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING",
+ }
+ AccessReason_value = map[string]int32{
+ "REASON_UNSPECIFIED": 0,
+ "CUSTOMER_INITIATED_SUPPORT": 1,
+ "GOOGLE_INITIATED_SERVICE": 2,
+ "THIRD_PARTY_DATA_REQUEST": 3,
+ "GOOGLE_INITIATED_REVIEW": 4,
+ "CUSTOMER_INITIATED_ACCESS": 5,
+ "GOOGLE_INITIATED_SYSTEM_OPERATION": 6,
+ "REASON_NOT_EXPECTED": 7,
+ "MODIFIED_CUSTOMER_INITIATED_ACCESS": 8,
+ "MODIFIED_GOOGLE_INITIATED_SYSTEM_OPERATION": 9,
+ "GOOGLE_RESPONSE_TO_PRODUCTION_ALERT": 10,
+ "CUSTOMER_AUTHORIZED_WORKFLOW_SERVICING": 11,
+ }
+)
+
+func (x AccessReason) Enum() *AccessReason {
+ p := new(AccessReason)
+ *p = x
+ return p
+}
+
+func (x AccessReason) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AccessReason) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[1].Descriptor()
+}
+
+func (AccessReason) Type() protoreflect.EnumType {
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[1]
+}
+
+func (x AccessReason) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AccessReason.Descriptor instead.
+func (AccessReason) EnumDescriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{1}
+}
+
// [CryptoKeyPurpose][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose]
// describes the cryptographic capabilities of a
// [CryptoKey][google.cloud.kms.v1.CryptoKey]. A given key can only be used
@@ -170,11 +287,11 @@ func (x CryptoKey_CryptoKeyPurpose) String() string {
}
func (CryptoKey_CryptoKeyPurpose) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[1].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[2].Descriptor()
}
func (CryptoKey_CryptoKeyPurpose) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[1]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[2]
}
func (x CryptoKey_CryptoKeyPurpose) Number() protoreflect.EnumNumber {
@@ -228,11 +345,11 @@ func (x KeyOperationAttestation_AttestationFormat) String() string {
}
func (KeyOperationAttestation_AttestationFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[2].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[3].Descriptor()
}
func (KeyOperationAttestation_AttestationFormat) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[2]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[3]
}
func (x KeyOperationAttestation_AttestationFormat) Number() protoreflect.EnumNumber {
@@ -357,6 +474,8 @@ const (
// Other hash functions can also be used:
// https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms
CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 31
+ // EdDSA on the Curve25519 in pure mode (taking data as input).
+ CryptoKeyVersion_EC_SIGN_ED25519 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 40
// HMAC-SHA256 signing with a 256 bit key.
CryptoKeyVersion_HMAC_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 32
// HMAC-SHA1 signing with a 160 bit key.
@@ -369,6 +488,12 @@ const (
CryptoKeyVersion_HMAC_SHA224 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 36
// Algorithm representing symmetric encryption by an external key manager.
CryptoKeyVersion_EXTERNAL_SYMMETRIC_ENCRYPTION CryptoKeyVersion_CryptoKeyVersionAlgorithm = 18
+ // The post-quantum Module-Lattice-Based Digital Signature Algorithm, at
+ // security level 3. Randomized version.
+ CryptoKeyVersion_PQ_SIGN_ML_DSA_65 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 56
+ // The post-quantum stateless hash-based digital signature algorithm, at
+ // security level 1. Randomized version.
+ CryptoKeyVersion_PQ_SIGN_SLH_DSA_SHA2_128S CryptoKeyVersion_CryptoKeyVersionAlgorithm = 57
)
// Enum value maps for CryptoKeyVersion_CryptoKeyVersionAlgorithm.
@@ -403,12 +528,15 @@ var (
12: "EC_SIGN_P256_SHA256",
13: "EC_SIGN_P384_SHA384",
31: "EC_SIGN_SECP256K1_SHA256",
+ 40: "EC_SIGN_ED25519",
32: "HMAC_SHA256",
33: "HMAC_SHA1",
34: "HMAC_SHA384",
35: "HMAC_SHA512",
36: "HMAC_SHA224",
18: "EXTERNAL_SYMMETRIC_ENCRYPTION",
+ 56: "PQ_SIGN_ML_DSA_65",
+ 57: "PQ_SIGN_SLH_DSA_SHA2_128S",
}
CryptoKeyVersion_CryptoKeyVersionAlgorithm_value = map[string]int32{
"CRYPTO_KEY_VERSION_ALGORITHM_UNSPECIFIED": 0,
@@ -440,12 +568,15 @@ var (
"EC_SIGN_P256_SHA256": 12,
"EC_SIGN_P384_SHA384": 13,
"EC_SIGN_SECP256K1_SHA256": 31,
+ "EC_SIGN_ED25519": 40,
"HMAC_SHA256": 32,
"HMAC_SHA1": 33,
"HMAC_SHA384": 34,
"HMAC_SHA512": 35,
"HMAC_SHA224": 36,
"EXTERNAL_SYMMETRIC_ENCRYPTION": 18,
+ "PQ_SIGN_ML_DSA_65": 56,
+ "PQ_SIGN_SLH_DSA_SHA2_128S": 57,
}
)
@@ -460,11 +591,11 @@ func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) String() string {
}
func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[3].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[4].Descriptor()
}
func (CryptoKeyVersion_CryptoKeyVersionAlgorithm) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[3]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[4]
}
func (x CryptoKeyVersion_CryptoKeyVersionAlgorithm) Number() protoreflect.EnumNumber {
@@ -579,11 +710,11 @@ func (x CryptoKeyVersion_CryptoKeyVersionState) String() string {
}
func (CryptoKeyVersion_CryptoKeyVersionState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[4].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[5].Descriptor()
}
func (CryptoKeyVersion_CryptoKeyVersionState) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[4]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[5]
}
func (x CryptoKeyVersion_CryptoKeyVersionState) Number() protoreflect.EnumNumber {
@@ -638,11 +769,11 @@ func (x CryptoKeyVersion_CryptoKeyVersionView) String() string {
}
func (CryptoKeyVersion_CryptoKeyVersionView) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[5].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[6].Descriptor()
}
func (CryptoKeyVersion_CryptoKeyVersionView) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[5]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[6]
}
func (x CryptoKeyVersion_CryptoKeyVersionView) Number() protoreflect.EnumNumber {
@@ -654,6 +785,74 @@ func (CryptoKeyVersion_CryptoKeyVersionView) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{4, 2}
}
+// The supported [PublicKey][google.cloud.kms.v1.PublicKey] formats.
+type PublicKey_PublicKeyFormat int32
+
+const (
+ // If the
+ // [public_key_format][google.cloud.kms.v1.GetPublicKeyRequest.public_key_format]
+ // field is not specified:
+ // - For PQC algorithms, an error will be returned.
+ // - For non-PQC algorithms, the default format is PEM, and the field
+ // [pem][google.cloud.kms.v1.PublicKey.pem] will be populated.
+ //
+ // Otherwise, the public key will be exported through the
+ // [public_key][google.cloud.kms.v1.PublicKey.public_key] field in the
+ // requested format.
+ PublicKey_PUBLIC_KEY_FORMAT_UNSPECIFIED PublicKey_PublicKeyFormat = 0
+ // The returned public key will be encoded in PEM format.
+ // See the [RFC7468](https://tools.ietf.org/html/rfc7468) sections for
+ // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2)
+ // and [Textual Encoding of Subject Public Key Info]
+ // (https://tools.ietf.org/html/rfc7468#section-13) for more information.
+ PublicKey_PEM PublicKey_PublicKeyFormat = 1
+ // This is supported only for PQC algorithms.
+ // The key material is returned in the format defined by NIST PQC
+ // standards (FIPS 203, FIPS 204, and FIPS 205).
+ PublicKey_NIST_PQC PublicKey_PublicKeyFormat = 3
+)
+
+// Enum value maps for PublicKey_PublicKeyFormat.
+var (
+ PublicKey_PublicKeyFormat_name = map[int32]string{
+ 0: "PUBLIC_KEY_FORMAT_UNSPECIFIED",
+ 1: "PEM",
+ 3: "NIST_PQC",
+ }
+ PublicKey_PublicKeyFormat_value = map[string]int32{
+ "PUBLIC_KEY_FORMAT_UNSPECIFIED": 0,
+ "PEM": 1,
+ "NIST_PQC": 3,
+ }
+)
+
+func (x PublicKey_PublicKeyFormat) Enum() *PublicKey_PublicKeyFormat {
+ p := new(PublicKey_PublicKeyFormat)
+ *p = x
+ return p
+}
+
+func (x PublicKey_PublicKeyFormat) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PublicKey_PublicKeyFormat) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[7].Descriptor()
+}
+
+func (PublicKey_PublicKeyFormat) Type() protoreflect.EnumType {
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[7]
+}
+
+func (x PublicKey_PublicKeyFormat) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PublicKey_PublicKeyFormat.Descriptor instead.
+func (PublicKey_PublicKeyFormat) EnumDescriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0}
+}
+
// [ImportMethod][google.cloud.kms.v1.ImportJob.ImportMethod] describes the
// key wrapping method chosen for this
// [ImportJob][google.cloud.kms.v1.ImportJob].
@@ -735,11 +934,11 @@ func (x ImportJob_ImportMethod) String() string {
}
func (ImportJob_ImportMethod) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[6].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[8].Descriptor()
}
func (ImportJob_ImportMethod) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[6]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[8]
}
func (x ImportJob_ImportMethod) Number() protoreflect.EnumNumber {
@@ -748,7 +947,7 @@ func (x ImportJob_ImportMethod) Number() protoreflect.EnumNumber {
// Deprecated: Use ImportJob_ImportMethod.Descriptor instead.
func (ImportJob_ImportMethod) EnumDescriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7, 0}
}
// The state of the [ImportJob][google.cloud.kms.v1.ImportJob], indicating if
@@ -800,11 +999,11 @@ func (x ImportJob_ImportJobState) String() string {
}
func (ImportJob_ImportJobState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_cloud_kms_v1_resources_proto_enumTypes[7].Descriptor()
+ return file_google_cloud_kms_v1_resources_proto_enumTypes[9].Descriptor()
}
func (ImportJob_ImportJobState) Type() protoreflect.EnumType {
- return &file_google_cloud_kms_v1_resources_proto_enumTypes[7]
+ return &file_google_cloud_kms_v1_resources_proto_enumTypes[9]
}
func (x ImportJob_ImportJobState) Number() protoreflect.EnumNumber {
@@ -813,7 +1012,7 @@ func (x ImportJob_ImportJobState) Number() protoreflect.EnumNumber {
// Deprecated: Use ImportJob_ImportJobState.Descriptor instead.
func (ImportJob_ImportJobState) EnumDescriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 1}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7, 1}
}
// A [KeyRing][google.cloud.kms.v1.KeyRing] is a toplevel logical grouping of
@@ -834,11 +1033,9 @@ type KeyRing struct {
func (x *KeyRing) Reset() {
*x = KeyRing{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *KeyRing) String() string {
@@ -849,7 +1046,7 @@ func (*KeyRing) ProtoMessage() {}
func (x *KeyRing) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -933,6 +1130,7 @@ type CryptoKey struct {
// Controls the rate of automatic rotation.
//
// Types that are assignable to RotationSchedule:
+ //
// *CryptoKey_RotationPeriod
RotationSchedule isCryptoKey_RotationSchedule `protobuf_oneof:"rotation_schedule"`
// A template describing settings for new
@@ -951,7 +1149,7 @@ type CryptoKey struct {
// [DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
// state before transitioning to
// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED].
- // If not specified at creation time, the default duration is 24 hours.
+ // If not specified at creation time, the default duration is 30 days.
DestroyScheduledDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=destroy_scheduled_duration,json=destroyScheduledDuration,proto3" json:"destroy_scheduled_duration,omitempty"`
// Immutable. The resource name of the backend environment where the key
// material for all [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion]
@@ -959,20 +1157,27 @@ type CryptoKey struct {
// where all related cryptographic operations are performed. Only applicable
// if [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] have a
// [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of
- // [EXTERNAL_VPC][CryptoKeyVersion.ProtectionLevel.EXTERNAL_VPC], with the
+ // [EXTERNAL_VPC][google.cloud.kms.v1.ProtectionLevel.EXTERNAL_VPC], with the
// resource name in the format `projects/*/locations/*/ekmConnections/*`.
// Note, this list is non-exhaustive and may apply to additional
// [ProtectionLevels][google.cloud.kms.v1.ProtectionLevel] in the future.
CryptoKeyBackend string `protobuf:"bytes,15,opt,name=crypto_key_backend,json=cryptoKeyBackend,proto3" json:"crypto_key_backend,omitempty"`
+ // Optional. The policy used for Key Access Justifications Policy Enforcement.
+ // If this field is present and this key is enrolled in Key Access
+ // Justifications Policy Enforcement, the policy will be evaluated in encrypt,
+ // decrypt, and sign operations, and the operation will fail if rejected by
+ // the policy. The policy is defined by specifying zero or more allowed
+ // justification codes.
+ // https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes
+ // By default, this field is absent, and all justification codes are allowed.
+ KeyAccessJustificationsPolicy *KeyAccessJustificationsPolicy `protobuf:"bytes,17,opt,name=key_access_justifications_policy,json=keyAccessJustificationsPolicy,proto3" json:"key_access_justifications_policy,omitempty"`
}
func (x *CryptoKey) Reset() {
*x = CryptoKey{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CryptoKey) String() string {
@@ -983,7 +1188,7 @@ func (*CryptoKey) ProtoMessage() {}
func (x *CryptoKey) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1082,6 +1287,13 @@ func (x *CryptoKey) GetCryptoKeyBackend() string {
return ""
}
+func (x *CryptoKey) GetKeyAccessJustificationsPolicy() *KeyAccessJustificationsPolicy {
+ if x != nil {
+ return x.KeyAccessJustificationsPolicy
+ }
+ return nil
+}
+
type isCryptoKey_RotationSchedule interface {
isCryptoKey_RotationSchedule()
}
@@ -1135,11 +1347,9 @@ type CryptoKeyVersionTemplate struct {
func (x *CryptoKeyVersionTemplate) Reset() {
*x = CryptoKeyVersionTemplate{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CryptoKeyVersionTemplate) String() string {
@@ -1150,7 +1360,7 @@ func (*CryptoKeyVersionTemplate) ProtoMessage() {}
func (x *CryptoKeyVersionTemplate) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1198,11 +1408,9 @@ type KeyOperationAttestation struct {
func (x *KeyOperationAttestation) Reset() {
*x = KeyOperationAttestation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *KeyOperationAttestation) String() string {
@@ -1213,7 +1421,7 @@ func (*KeyOperationAttestation) ProtoMessage() {}
func (x *KeyOperationAttestation) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1342,11 +1550,9 @@ type CryptoKeyVersion struct {
func (x *CryptoKeyVersion) Reset() {
*x = CryptoKeyVersion{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CryptoKeyVersion) String() string {
@@ -1357,7 +1563,7 @@ func (*CryptoKeyVersion) ProtoMessage() {}
func (x *CryptoKeyVersion) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1484,7 +1690,76 @@ func (x *CryptoKeyVersion) GetReimportEligible() bool {
return false
}
-// The public key for a given
+// Data with integrity verification field.
+type ChecksummedData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Raw Data.
+ Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+ // Integrity verification field. A CRC32C
+ // checksum of the returned
+ // [ChecksummedData.data][google.cloud.kms.v1.ChecksummedData.data]. An
+ // integrity check of
+ // [ChecksummedData.data][google.cloud.kms.v1.ChecksummedData.data] can be
+ // performed by computing the CRC32C checksum of
+ // [ChecksummedData.data][google.cloud.kms.v1.ChecksummedData.data] and
+ // comparing your results to this field. Discard the response in case of
+ // non-matching checksum values, and perform a limited number of retries. A
+ // persistent mismatch may indicate an issue in your computation of the CRC32C
+ // checksum. Note: This field is defined as int64 for reasons of compatibility
+ // across different languages. However, it is a non-negative integer, which
+ // will never exceed `2^32-1`, and can be safely downconverted to uint32 in
+ // languages that support this type.
+ Crc32CChecksum *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=crc32c_checksum,json=crc32cChecksum,proto3" json:"crc32c_checksum,omitempty"`
+}
+
+func (x *ChecksummedData) Reset() {
+ *x = ChecksummedData{}
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ChecksummedData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ChecksummedData) ProtoMessage() {}
+
+func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead.
+func (*ChecksummedData) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ChecksummedData) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+func (x *ChecksummedData) GetCrc32CChecksum() *wrapperspb.Int64Value {
+ if x != nil {
+ return x.Crc32CChecksum
+ }
+ return nil
+}
+
+// The public keys for a given
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. Obtained via
// [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
type PublicKey struct {
@@ -1512,8 +1787,8 @@ type PublicKey struct {
// mismatch may indicate an issue in your computation of the CRC32C checksum.
// Note: This field is defined as int64 for reasons of compatibility across
// different languages. However, it is a non-negative integer, which will
- // never exceed 2^32-1, and can be safely downconverted to uint32 in languages
- // that support this type.
+ // never exceed `2^32-1`, and can be safely downconverted to uint32 in
+ // languages that support this type.
//
// NOTE: This field is in Beta.
PemCrc32C *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=pem_crc32c,json=pemCrc32c,proto3" json:"pem_crc32c,omitempty"`
@@ -1526,15 +1801,22 @@ type PublicKey struct {
// The [ProtectionLevel][google.cloud.kms.v1.ProtectionLevel] of the
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key.
ProtectionLevel ProtectionLevel `protobuf:"varint,5,opt,name=protection_level,json=protectionLevel,proto3,enum=google.cloud.kms.v1.ProtectionLevel" json:"protection_level,omitempty"`
+ // The [PublicKey][google.cloud.kms.v1.PublicKey] format specified by the
+ // customer through the
+ // [public_key_format][google.cloud.kms.v1.GetPublicKeyRequest.public_key_format]
+ // field.
+ PublicKeyFormat PublicKey_PublicKeyFormat `protobuf:"varint,7,opt,name=public_key_format,json=publicKeyFormat,proto3,enum=google.cloud.kms.v1.PublicKey_PublicKeyFormat" json:"public_key_format,omitempty"`
+ // This field contains the public key (with integrity verification), formatted
+ // according to the
+ // [public_key_format][google.cloud.kms.v1.PublicKey.public_key_format] field.
+ PublicKey *ChecksummedData `protobuf:"bytes,8,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
}
func (x *PublicKey) Reset() {
*x = PublicKey{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PublicKey) String() string {
@@ -1544,8 +1826,8 @@ func (x *PublicKey) String() string {
func (*PublicKey) ProtoMessage() {}
func (x *PublicKey) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1557,7 +1839,7 @@ func (x *PublicKey) ProtoReflect() protoreflect.Message {
// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead.
func (*PublicKey) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{5}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6}
}
func (x *PublicKey) GetPem() string {
@@ -1595,6 +1877,20 @@ func (x *PublicKey) GetProtectionLevel() ProtectionLevel {
return ProtectionLevel_PROTECTION_LEVEL_UNSPECIFIED
}
+func (x *PublicKey) GetPublicKeyFormat() PublicKey_PublicKeyFormat {
+ if x != nil {
+ return x.PublicKeyFormat
+ }
+ return PublicKey_PUBLIC_KEY_FORMAT_UNSPECIFIED
+}
+
+func (x *PublicKey) GetPublicKey() *ChecksummedData {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+
// An [ImportJob][google.cloud.kms.v1.ImportJob] can be used to create
// [CryptoKeys][google.cloud.kms.v1.CryptoKey] and
// [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion] using pre-existing
@@ -1679,11 +1975,9 @@ type ImportJob struct {
func (x *ImportJob) Reset() {
*x = ImportJob{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ImportJob) String() string {
@@ -1693,8 +1987,8 @@ func (x *ImportJob) String() string {
func (*ImportJob) ProtoMessage() {}
func (x *ImportJob) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1706,7 +2000,7 @@ func (x *ImportJob) ProtoReflect() protoreflect.Message {
// Deprecated: Use ImportJob.ProtoReflect.Descriptor instead.
func (*ImportJob) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7}
}
func (x *ImportJob) GetName() string {
@@ -1802,11 +2096,9 @@ type ExternalProtectionLevelOptions struct {
func (x *ExternalProtectionLevelOptions) Reset() {
*x = ExternalProtectionLevelOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExternalProtectionLevelOptions) String() string {
@@ -1816,8 +2108,8 @@ func (x *ExternalProtectionLevelOptions) String() string {
func (*ExternalProtectionLevelOptions) ProtoMessage() {}
func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[8]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1829,7 +2121,7 @@ func (x *ExternalProtectionLevelOptions) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExternalProtectionLevelOptions.ProtoReflect.Descriptor instead.
func (*ExternalProtectionLevelOptions) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{8}
}
func (x *ExternalProtectionLevelOptions) GetExternalKeyUri() string {
@@ -1846,6 +2138,61 @@ func (x *ExternalProtectionLevelOptions) GetEkmConnectionKeyPath() string {
return ""
}
+// A
+// [KeyAccessJustificationsPolicy][google.cloud.kms.v1.KeyAccessJustificationsPolicy]
+// specifies zero or more allowed
+// [AccessReason][google.cloud.kms.v1.AccessReason] values for encrypt, decrypt,
+// and sign operations on a [CryptoKey][google.cloud.kms.v1.CryptoKey].
+type KeyAccessJustificationsPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of allowed reasons for access to a
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey]. Zero allowed access reasons
+ // means all encrypt, decrypt, and sign operations for the
+ // [CryptoKey][google.cloud.kms.v1.CryptoKey] associated with this policy will
+ // fail.
+ AllowedAccessReasons []AccessReason `protobuf:"varint,1,rep,packed,name=allowed_access_reasons,json=allowedAccessReasons,proto3,enum=google.cloud.kms.v1.AccessReason" json:"allowed_access_reasons,omitempty"`
+}
+
+func (x *KeyAccessJustificationsPolicy) Reset() {
+ *x = KeyAccessJustificationsPolicy{}
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *KeyAccessJustificationsPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyAccessJustificationsPolicy) ProtoMessage() {}
+
+func (x *KeyAccessJustificationsPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyAccessJustificationsPolicy.ProtoReflect.Descriptor instead.
+func (*KeyAccessJustificationsPolicy) Descriptor() ([]byte, []int) {
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *KeyAccessJustificationsPolicy) GetAllowedAccessReasons() []AccessReason {
+ if x != nil {
+ return x.AllowedAccessReasons
+ }
+ return nil
+}
+
// Certificate chains needed to verify the attestation.
// Certificates in chains are PEM-encoded and are ordered based on
// https://tools.ietf.org/html/rfc5246#section-7.4.2.
@@ -1864,11 +2211,9 @@ type KeyOperationAttestation_CertificateChains struct {
func (x *KeyOperationAttestation_CertificateChains) Reset() {
*x = KeyOperationAttestation_CertificateChains{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *KeyOperationAttestation_CertificateChains) String() string {
@@ -1878,8 +2223,8 @@ func (x *KeyOperationAttestation_CertificateChains) String() string {
func (*KeyOperationAttestation_CertificateChains) ProtoMessage() {}
func (x *KeyOperationAttestation_CertificateChains) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[11]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1933,11 +2278,9 @@ type ImportJob_WrappingPublicKey struct {
func (x *ImportJob_WrappingPublicKey) Reset() {
*x = ImportJob_WrappingPublicKey{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ImportJob_WrappingPublicKey) String() string {
@@ -1947,8 +2290,8 @@ func (x *ImportJob_WrappingPublicKey) String() string {
func (*ImportJob_WrappingPublicKey) ProtoMessage() {}
func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_cloud_kms_v1_resources_proto_msgTypes[12]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1960,7 +2303,7 @@ func (x *ImportJob_WrappingPublicKey) ProtoReflect() protoreflect.Message {
// Deprecated: Use ImportJob_WrappingPublicKey.ProtoReflect.Descriptor instead.
func (*ImportJob_WrappingPublicKey) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{6, 0}
+ return file_google_cloud_kms_v1_resources_proto_rawDescGZIP(), []int{7, 0}
}
func (x *ImportJob_WrappingPublicKey) GetPem() string {
@@ -1999,7 +2342,7 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65,
0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67,
- 0x7d, 0x22, 0xd1, 0x08, 0x0a, 0x09, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12,
+ 0x7d, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12,
0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d,
0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
@@ -2045,235 +2388,298 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79,
0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09,
0xe0, 0x41, 0x05, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x1e, 0x43,
- 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x55, 0x52, 0x50, 0x4f, 0x53,
- 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59,
- 0x50, 0x54, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52,
- 0x49, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x53, 0x59,
- 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10,
- 0x06, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x41, 0x57, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54,
- 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x07, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41,
- 0x43, 0x10, 0x09, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
- 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b,
- 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
- 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d,
- 0x42, 0x13, 0x0a, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61,
- 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76,
- 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
- 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c,
- 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x22, 0x83, 0x04, 0x0a, 0x17, 0x4b, 0x65, 0x79, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
- 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12,
- 0x64, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x73, 0x1a, 0x98, 0x01, 0x0a, 0x11, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63,
- 0x61, 0x76, 0x69, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x0b, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x2a,
- 0x0a, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x65,
- 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x43, 0x61, 0x72, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
- 0x65, 0x72, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x65, 0x72, 0x74, 0x73,
- 0x22, 0x6b, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46,
- 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x1e, 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x41,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50,
- 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56,
- 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45,
- 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x32,
- 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x04, 0x22, 0xf4, 0x14,
- 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x80, 0x01, 0x0a, 0x20,
+ 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6a, 0x75, 0x73, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x1d, 0x6b, 0x65, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x39,
+ 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x75, 0x72, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x22,
+ 0x0a, 0x1e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x50, 0x55, 0x52,
+ 0x50, 0x4f, 0x53, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45,
+ 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x53, 0x59, 0x4d, 0x4d,
+ 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12,
+ 0x41, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59,
+ 0x50, 0x54, 0x10, 0x06, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x41, 0x57, 0x5f, 0x45, 0x4e, 0x43, 0x52,
+ 0x59, 0x50, 0x54, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x07, 0x12, 0x07, 0x0a,
+ 0x03, 0x4d, 0x41, 0x43, 0x10, 0x09, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
+ 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b,
+ 0x65, 0x79, 0x7d, 0x42, 0x13, 0x0a, 0x11, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d,
+ 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x22, 0x83, 0x04, 0x0a, 0x17, 0x4b,
+ 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79,
+ 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x12, 0x64, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x65,
+ 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x1a, 0x98, 0x01, 0x0a, 0x11, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x21,
+ 0x0a, 0x0c, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, 0x76, 0x69, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74,
+ 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x72, 0x64,
+ 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x61, 0x72, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x34, 0x0a,
+ 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x65,
+ 0x72, 0x74, 0x73, 0x22, 0x6b, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x22, 0x0a, 0x1e, 0x41, 0x54, 0x54, 0x45,
+ 0x53, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14,
+ 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45,
+ 0x53, 0x53, 0x45, 0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d,
+ 0x5f, 0x56, 0x32, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x04,
+ 0x22, 0xbf, 0x15, 0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x12, 0x54, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72,
+ 0x69, 0x74, 0x68, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54,
- 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76,
- 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50,
- 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x65, 0x76, 0x65, 0x6c, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41,
- 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x61,
- 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65,
- 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a,
- 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
- 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x64, 0x65,
- 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x12, 0x64, 0x65, 0x73,
- 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x61,
+ 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f,
- 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x40, 0x0a, 0x0b,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37,
- 0x0a, 0x15, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
- 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x13, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
- 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x19, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65,
- 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x17, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75,
- 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x23, 0x65, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
- 0x14, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x20, 0x65, 0x78, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46,
- 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x21,
- 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x1e, 0x65, 0x78,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x11,
- 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c,
- 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x22, 0xcd,
- 0x07, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x2c, 0x0a, 0x28,
- 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49,
- 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x47, 0x4f,
- 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45,
- 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x41,
- 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x29, 0x12, 0x0f, 0x0a, 0x0b,
- 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x13, 0x12, 0x0f, 0x0a,
- 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x42, 0x43, 0x10, 0x2a, 0x12, 0x0f,
- 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43, 0x42, 0x43, 0x10, 0x2b, 0x12,
- 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x54, 0x52, 0x10, 0x2c,
- 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43, 0x54, 0x52, 0x10,
- 0x2d, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53,
- 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12,
- 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f,
- 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x1c, 0x0a,
- 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30,
- 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c, 0x0a, 0x18, 0x52,
- 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34, 0x30, 0x39, 0x36,
- 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37, 0x32,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36,
- 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74,
+ 0x72, 0x6f, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x12,
+ 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x72,
+ 0x6f, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x69,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12,
+ 0x40, 0x0a, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c,
+ 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69,
+ 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x19, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
+ 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x17, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61,
+ 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x23, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x20, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12,
+ 0x7e, 0x0a, 0x21, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x1e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x30, 0x0a, 0x11, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67,
+ 0x69, 0x62, 0x6c, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x10, 0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c,
+ 0x65, 0x22, 0x98, 0x08, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12,
+ 0x2c, 0x0a, 0x28, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45,
+ 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a,
+ 0x1b, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49,
+ 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x29, 0x12,
+ 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x10, 0x13,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x42, 0x43, 0x10,
+ 0x2a, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43, 0x42, 0x43,
+ 0x10, 0x2b, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x43, 0x54,
+ 0x52, 0x10, 0x2c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x43,
+ 0x54, 0x52, 0x10, 0x2d, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e,
+ 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
+ 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50,
+ 0x53, 0x53, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03,
+ 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53,
+ 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x1c,
+ 0x0a, 0x18, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x34,
+ 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x0f, 0x12, 0x1e, 0x0a, 0x1a,
+ 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x32,
+ 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a,
+ 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33,
+ 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x12, 0x1e, 0x0a, 0x1a,
+ 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34,
+ 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a,
+ 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34,
+ 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x10, 0x12, 0x1b, 0x0a, 0x17,
+ 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43,
+ 0x53, 0x31, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41,
0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f,
- 0x32, 0x30, 0x34, 0x38, 0x10, 0x1c, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49,
- 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x33, 0x30, 0x37,
- 0x32, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
- 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x10, 0x1e,
- 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f,
- 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
- 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50,
- 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32,
- 0x35, 0x36, 0x10, 0x09, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52,
- 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48,
- 0x41, 0x32, 0x35, 0x36, 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45,
- 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f,
- 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f,
- 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34,
- 0x38, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f,
- 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37,
- 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f,
- 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39,
- 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53,
- 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10,
- 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x33, 0x38,
- 0x34, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x43,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f,
- 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41,
- 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43,
- 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41,
- 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d,
- 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45,
- 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49,
- 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, 0x9b,
- 0x02, 0x0a, 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50,
- 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53,
- 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
- 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45,
- 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e,
- 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42,
- 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59,
- 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f,
- 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50,
- 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12,
- 0x11, 0x0a, 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
- 0x10, 0x07, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x45, 0x4e,
- 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45,
- 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, 0x45,
- 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0a, 0x22, 0x49, 0x0a, 0x14,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b,
- 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f,
- 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a,
- 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x33, 0x30, 0x37, 0x32, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x53, 0x41, 0x5f, 0x53, 0x49,
+ 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x57, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x34, 0x30, 0x39,
+ 0x36, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59,
+ 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41,
+ 0x32, 0x35, 0x36, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43,
+ 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53,
+ 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x09, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x44,
+ 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36,
+ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41,
+ 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30,
+ 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x11, 0x12, 0x1e, 0x0a, 0x1a, 0x52,
+ 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f,
+ 0x32, 0x30, 0x34, 0x38, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x25, 0x12, 0x1e, 0x0a, 0x1a, 0x52,
+ 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f,
+ 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x26, 0x12, 0x1e, 0x0a, 0x1a, 0x52,
+ 0x53, 0x41, 0x5f, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f,
+ 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x27, 0x12, 0x17, 0x0a, 0x13, 0x45,
+ 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x32, 0x35, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32,
+ 0x35, 0x36, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
+ 0x50, 0x33, 0x38, 0x34, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a,
+ 0x18, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36,
+ 0x4b, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x13, 0x0a, 0x0f, 0x45,
+ 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x28,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10,
+ 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10,
+ 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32,
+ 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32,
+ 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f,
+ 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x51, 0x5f, 0x53, 0x49, 0x47,
+ 0x4e, 0x5f, 0x4d, 0x4c, 0x5f, 0x44, 0x53, 0x41, 0x5f, 0x36, 0x35, 0x10, 0x38, 0x12, 0x1d, 0x0a,
+ 0x19, 0x50, 0x51, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x4c, 0x48, 0x5f, 0x44, 0x53, 0x41,
+ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x5f, 0x31, 0x32, 0x38, 0x53, 0x10, 0x39, 0x22, 0x9b, 0x02, 0x0a,
+ 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f,
+ 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41,
+ 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45,
+ 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42,
+ 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45,
+ 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44,
+ 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f, 0x53, 0x43,
+ 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x45, 0x4e,
+ 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a,
+ 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07,
+ 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46,
+ 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x45, 0x4e, 0x44, 0x49,
+ 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54,
+ 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x58, 0x54,
+ 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0a, 0x22, 0x49, 0x0a, 0x14, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69,
+ 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59,
+ 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46,
+ 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f,
+ 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69,
+ 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x7d, 0x22, 0x6b, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x0e, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22,
+ 0xbc, 0x05, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12,
+ 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a,
+ 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f,
+ 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f,
+ 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
+ 0x5a, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69,
+ 0x63, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0f, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x43, 0x0a, 0x0a, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
+ 0x22, 0x4b, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x21, 0x0a, 0x1d, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x5f, 0x4b, 0x45,
+ 0x59, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x45, 0x4d, 0x10, 0x01, 0x12,
+ 0x0c, 0x0a, 0x08, 0x4e, 0x49, 0x53, 0x54, 0x5f, 0x50, 0x51, 0x43, 0x10, 0x03, 0x3a, 0xae, 0x01,
+ 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75,
+ 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f,
@@ -2281,137 +2687,143 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72,
0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41,
- 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69,
- 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4,
+ 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70,
+ 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41,
+ 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
+ 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42,
+ 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65,
+ 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f,
+ 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12,
+ 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x84,
- 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
- 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f,
- 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0d,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f,
- 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x65,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, 0x78, 0x70,
- 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f,
- 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
- 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74,
- 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b,
- 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, 0x11, 0x57,
- 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
- 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70,
- 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74,
- 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x45,
- 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
- 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33,
- 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36,
- 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34,
- 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36,
- 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33,
- 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32,
- 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50,
- 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53,
- 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41,
- 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05,
- 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39,
- 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, 0x49, 0x6d,
- 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x1c,
- 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16,
- 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41,
- 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45,
- 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x3a,
- 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6d,
- 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
- 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72,
- 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f,
- 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, 0x01, 0x0a,
- 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
- 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x6b, 0x6d,
- 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
- 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, 0x6d, 0x43,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x74, 0x68,
- 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, 0x49, 0x4f,
- 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
- 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, 0x41, 0x52,
- 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08,
- 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58,
- 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x42, 0x88, 0x01, 0x0a,
- 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73,
- 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56,
- 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72,
+ 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12,
+ 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, 0x11, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67,
+ 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c,
+ 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19,
+ 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52,
+ 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41,
+ 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52,
+ 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41,
+ 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52,
+ 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41,
+ 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a,
+ 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53,
+ 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12,
+ 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32,
+ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41,
+ 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35,
+ 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f,
+ 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49,
+ 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12,
+ 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45,
+ 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62,
+ 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
+ 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d,
+ 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, 0x01, 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55,
+ 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x74, 0x68, 0x22, 0x78, 0x0a, 0x1d, 0x4b, 0x65, 0x79,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x57, 0x0a, 0x16, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x61,
+ 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x14, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x73, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54,
+ 0x57, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a,
+ 0x0c, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x2a,
+ 0xab, 0x03, 0x0a, 0x0c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x43, 0x55, 0x53, 0x54,
+ 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53,
+ 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x47, 0x4f, 0x4f, 0x47,
+ 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x45, 0x52,
+ 0x56, 0x49, 0x43, 0x45, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x48, 0x49, 0x52, 0x44, 0x5f,
+ 0x50, 0x41, 0x52, 0x54, 0x59, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45,
+ 0x53, 0x54, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49,
+ 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x56, 0x49, 0x45, 0x57, 0x10,
+ 0x04, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e,
+ 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x05,
+ 0x12, 0x25, 0x0a, 0x21, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49,
+ 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x5f, 0x4f, 0x50, 0x45, 0x52,
+ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x41, 0x53, 0x4f,
+ 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x58, 0x50, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x07,
+ 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53,
+ 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f,
+ 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x08, 0x12, 0x2e, 0x0a, 0x2a, 0x4d, 0x4f, 0x44, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54,
+ 0x49, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x5f, 0x4f, 0x50, 0x45,
+ 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x27, 0x0a, 0x23, 0x47, 0x4f, 0x4f, 0x47,
+ 0x4c, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50,
+ 0x52, 0x4f, 0x44, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x4c, 0x45, 0x52, 0x54, 0x10,
+ 0x0a, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x41, 0x55,
+ 0x54, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x45, 0x44, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f,
+ 0x57, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x0b, 0x42, 0x85, 0x01,
+ 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d,
+ 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca,
+ 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b,
+ 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2426,73 +2838,82 @@ func file_google_cloud_kms_v1_resources_proto_rawDescGZIP() []byte {
return file_google_cloud_kms_v1_resources_proto_rawDescData
}
-var file_google_cloud_kms_v1_resources_proto_enumTypes = make([]protoimpl.EnumInfo, 8)
-var file_google_cloud_kms_v1_resources_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
-var file_google_cloud_kms_v1_resources_proto_goTypes = []interface{}{
+var file_google_cloud_kms_v1_resources_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
+var file_google_cloud_kms_v1_resources_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_google_cloud_kms_v1_resources_proto_goTypes = []any{
(ProtectionLevel)(0), // 0: google.cloud.kms.v1.ProtectionLevel
- (CryptoKey_CryptoKeyPurpose)(0), // 1: google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose
- (KeyOperationAttestation_AttestationFormat)(0), // 2: google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat
- (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 3: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- (CryptoKeyVersion_CryptoKeyVersionState)(0), // 4: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState
- (CryptoKeyVersion_CryptoKeyVersionView)(0), // 5: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView
- (ImportJob_ImportMethod)(0), // 6: google.cloud.kms.v1.ImportJob.ImportMethod
- (ImportJob_ImportJobState)(0), // 7: google.cloud.kms.v1.ImportJob.ImportJobState
- (*KeyRing)(nil), // 8: google.cloud.kms.v1.KeyRing
- (*CryptoKey)(nil), // 9: google.cloud.kms.v1.CryptoKey
- (*CryptoKeyVersionTemplate)(nil), // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate
- (*KeyOperationAttestation)(nil), // 11: google.cloud.kms.v1.KeyOperationAttestation
- (*CryptoKeyVersion)(nil), // 12: google.cloud.kms.v1.CryptoKeyVersion
- (*PublicKey)(nil), // 13: google.cloud.kms.v1.PublicKey
- (*ImportJob)(nil), // 14: google.cloud.kms.v1.ImportJob
- (*ExternalProtectionLevelOptions)(nil), // 15: google.cloud.kms.v1.ExternalProtectionLevelOptions
- nil, // 16: google.cloud.kms.v1.CryptoKey.LabelsEntry
- (*KeyOperationAttestation_CertificateChains)(nil), // 17: google.cloud.kms.v1.KeyOperationAttestation.CertificateChains
- (*ImportJob_WrappingPublicKey)(nil), // 18: google.cloud.kms.v1.ImportJob.WrappingPublicKey
- (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 20: google.protobuf.Duration
- (*wrapperspb.Int64Value)(nil), // 21: google.protobuf.Int64Value
+ (AccessReason)(0), // 1: google.cloud.kms.v1.AccessReason
+ (CryptoKey_CryptoKeyPurpose)(0), // 2: google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose
+ (KeyOperationAttestation_AttestationFormat)(0), // 3: google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat
+ (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 4: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ (CryptoKeyVersion_CryptoKeyVersionState)(0), // 5: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState
+ (CryptoKeyVersion_CryptoKeyVersionView)(0), // 6: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView
+ (PublicKey_PublicKeyFormat)(0), // 7: google.cloud.kms.v1.PublicKey.PublicKeyFormat
+ (ImportJob_ImportMethod)(0), // 8: google.cloud.kms.v1.ImportJob.ImportMethod
+ (ImportJob_ImportJobState)(0), // 9: google.cloud.kms.v1.ImportJob.ImportJobState
+ (*KeyRing)(nil), // 10: google.cloud.kms.v1.KeyRing
+ (*CryptoKey)(nil), // 11: google.cloud.kms.v1.CryptoKey
+ (*CryptoKeyVersionTemplate)(nil), // 12: google.cloud.kms.v1.CryptoKeyVersionTemplate
+ (*KeyOperationAttestation)(nil), // 13: google.cloud.kms.v1.KeyOperationAttestation
+ (*CryptoKeyVersion)(nil), // 14: google.cloud.kms.v1.CryptoKeyVersion
+ (*ChecksummedData)(nil), // 15: google.cloud.kms.v1.ChecksummedData
+ (*PublicKey)(nil), // 16: google.cloud.kms.v1.PublicKey
+ (*ImportJob)(nil), // 17: google.cloud.kms.v1.ImportJob
+ (*ExternalProtectionLevelOptions)(nil), // 18: google.cloud.kms.v1.ExternalProtectionLevelOptions
+ (*KeyAccessJustificationsPolicy)(nil), // 19: google.cloud.kms.v1.KeyAccessJustificationsPolicy
+ nil, // 20: google.cloud.kms.v1.CryptoKey.LabelsEntry
+ (*KeyOperationAttestation_CertificateChains)(nil), // 21: google.cloud.kms.v1.KeyOperationAttestation.CertificateChains
+ (*ImportJob_WrappingPublicKey)(nil), // 22: google.cloud.kms.v1.ImportJob.WrappingPublicKey
+ (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 24: google.protobuf.Duration
+ (*wrapperspb.Int64Value)(nil), // 25: google.protobuf.Int64Value
}
var file_google_cloud_kms_v1_resources_proto_depIdxs = []int32{
- 19, // 0: google.cloud.kms.v1.KeyRing.create_time:type_name -> google.protobuf.Timestamp
- 12, // 1: google.cloud.kms.v1.CryptoKey.primary:type_name -> google.cloud.kms.v1.CryptoKeyVersion
- 1, // 2: google.cloud.kms.v1.CryptoKey.purpose:type_name -> google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose
- 19, // 3: google.cloud.kms.v1.CryptoKey.create_time:type_name -> google.protobuf.Timestamp
- 19, // 4: google.cloud.kms.v1.CryptoKey.next_rotation_time:type_name -> google.protobuf.Timestamp
- 20, // 5: google.cloud.kms.v1.CryptoKey.rotation_period:type_name -> google.protobuf.Duration
- 10, // 6: google.cloud.kms.v1.CryptoKey.version_template:type_name -> google.cloud.kms.v1.CryptoKeyVersionTemplate
- 16, // 7: google.cloud.kms.v1.CryptoKey.labels:type_name -> google.cloud.kms.v1.CryptoKey.LabelsEntry
- 20, // 8: google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration:type_name -> google.protobuf.Duration
- 0, // 9: google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 3, // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- 2, // 11: google.cloud.kms.v1.KeyOperationAttestation.format:type_name -> google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat
- 17, // 12: google.cloud.kms.v1.KeyOperationAttestation.cert_chains:type_name -> google.cloud.kms.v1.KeyOperationAttestation.CertificateChains
- 4, // 13: google.cloud.kms.v1.CryptoKeyVersion.state:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState
- 0, // 14: google.cloud.kms.v1.CryptoKeyVersion.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 3, // 15: google.cloud.kms.v1.CryptoKeyVersion.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- 11, // 16: google.cloud.kms.v1.CryptoKeyVersion.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation
- 19, // 17: google.cloud.kms.v1.CryptoKeyVersion.create_time:type_name -> google.protobuf.Timestamp
- 19, // 18: google.cloud.kms.v1.CryptoKeyVersion.generate_time:type_name -> google.protobuf.Timestamp
- 19, // 19: google.cloud.kms.v1.CryptoKeyVersion.destroy_time:type_name -> google.protobuf.Timestamp
- 19, // 20: google.cloud.kms.v1.CryptoKeyVersion.destroy_event_time:type_name -> google.protobuf.Timestamp
- 19, // 21: google.cloud.kms.v1.CryptoKeyVersion.import_time:type_name -> google.protobuf.Timestamp
- 15, // 22: google.cloud.kms.v1.CryptoKeyVersion.external_protection_level_options:type_name -> google.cloud.kms.v1.ExternalProtectionLevelOptions
- 3, // 23: google.cloud.kms.v1.PublicKey.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- 21, // 24: google.cloud.kms.v1.PublicKey.pem_crc32c:type_name -> google.protobuf.Int64Value
- 0, // 25: google.cloud.kms.v1.PublicKey.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 6, // 26: google.cloud.kms.v1.ImportJob.import_method:type_name -> google.cloud.kms.v1.ImportJob.ImportMethod
- 0, // 27: google.cloud.kms.v1.ImportJob.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 19, // 28: google.cloud.kms.v1.ImportJob.create_time:type_name -> google.protobuf.Timestamp
- 19, // 29: google.cloud.kms.v1.ImportJob.generate_time:type_name -> google.protobuf.Timestamp
- 19, // 30: google.cloud.kms.v1.ImportJob.expire_time:type_name -> google.protobuf.Timestamp
- 19, // 31: google.cloud.kms.v1.ImportJob.expire_event_time:type_name -> google.protobuf.Timestamp
- 7, // 32: google.cloud.kms.v1.ImportJob.state:type_name -> google.cloud.kms.v1.ImportJob.ImportJobState
- 18, // 33: google.cloud.kms.v1.ImportJob.public_key:type_name -> google.cloud.kms.v1.ImportJob.WrappingPublicKey
- 11, // 34: google.cloud.kms.v1.ImportJob.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation
- 35, // [35:35] is the sub-list for method output_type
- 35, // [35:35] is the sub-list for method input_type
- 35, // [35:35] is the sub-list for extension type_name
- 35, // [35:35] is the sub-list for extension extendee
- 0, // [0:35] is the sub-list for field type_name
+ 23, // 0: google.cloud.kms.v1.KeyRing.create_time:type_name -> google.protobuf.Timestamp
+ 14, // 1: google.cloud.kms.v1.CryptoKey.primary:type_name -> google.cloud.kms.v1.CryptoKeyVersion
+ 2, // 2: google.cloud.kms.v1.CryptoKey.purpose:type_name -> google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose
+ 23, // 3: google.cloud.kms.v1.CryptoKey.create_time:type_name -> google.protobuf.Timestamp
+ 23, // 4: google.cloud.kms.v1.CryptoKey.next_rotation_time:type_name -> google.protobuf.Timestamp
+ 24, // 5: google.cloud.kms.v1.CryptoKey.rotation_period:type_name -> google.protobuf.Duration
+ 12, // 6: google.cloud.kms.v1.CryptoKey.version_template:type_name -> google.cloud.kms.v1.CryptoKeyVersionTemplate
+ 20, // 7: google.cloud.kms.v1.CryptoKey.labels:type_name -> google.cloud.kms.v1.CryptoKey.LabelsEntry
+ 24, // 8: google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration:type_name -> google.protobuf.Duration
+ 19, // 9: google.cloud.kms.v1.CryptoKey.key_access_justifications_policy:type_name -> google.cloud.kms.v1.KeyAccessJustificationsPolicy
+ 0, // 10: google.cloud.kms.v1.CryptoKeyVersionTemplate.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 4, // 11: google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ 3, // 12: google.cloud.kms.v1.KeyOperationAttestation.format:type_name -> google.cloud.kms.v1.KeyOperationAttestation.AttestationFormat
+ 21, // 13: google.cloud.kms.v1.KeyOperationAttestation.cert_chains:type_name -> google.cloud.kms.v1.KeyOperationAttestation.CertificateChains
+ 5, // 14: google.cloud.kms.v1.CryptoKeyVersion.state:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState
+ 0, // 15: google.cloud.kms.v1.CryptoKeyVersion.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 4, // 16: google.cloud.kms.v1.CryptoKeyVersion.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ 13, // 17: google.cloud.kms.v1.CryptoKeyVersion.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation
+ 23, // 18: google.cloud.kms.v1.CryptoKeyVersion.create_time:type_name -> google.protobuf.Timestamp
+ 23, // 19: google.cloud.kms.v1.CryptoKeyVersion.generate_time:type_name -> google.protobuf.Timestamp
+ 23, // 20: google.cloud.kms.v1.CryptoKeyVersion.destroy_time:type_name -> google.protobuf.Timestamp
+ 23, // 21: google.cloud.kms.v1.CryptoKeyVersion.destroy_event_time:type_name -> google.protobuf.Timestamp
+ 23, // 22: google.cloud.kms.v1.CryptoKeyVersion.import_time:type_name -> google.protobuf.Timestamp
+ 18, // 23: google.cloud.kms.v1.CryptoKeyVersion.external_protection_level_options:type_name -> google.cloud.kms.v1.ExternalProtectionLevelOptions
+ 25, // 24: google.cloud.kms.v1.ChecksummedData.crc32c_checksum:type_name -> google.protobuf.Int64Value
+ 4, // 25: google.cloud.kms.v1.PublicKey.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ 25, // 26: google.cloud.kms.v1.PublicKey.pem_crc32c:type_name -> google.protobuf.Int64Value
+ 0, // 27: google.cloud.kms.v1.PublicKey.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 7, // 28: google.cloud.kms.v1.PublicKey.public_key_format:type_name -> google.cloud.kms.v1.PublicKey.PublicKeyFormat
+ 15, // 29: google.cloud.kms.v1.PublicKey.public_key:type_name -> google.cloud.kms.v1.ChecksummedData
+ 8, // 30: google.cloud.kms.v1.ImportJob.import_method:type_name -> google.cloud.kms.v1.ImportJob.ImportMethod
+ 0, // 31: google.cloud.kms.v1.ImportJob.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 23, // 32: google.cloud.kms.v1.ImportJob.create_time:type_name -> google.protobuf.Timestamp
+ 23, // 33: google.cloud.kms.v1.ImportJob.generate_time:type_name -> google.protobuf.Timestamp
+ 23, // 34: google.cloud.kms.v1.ImportJob.expire_time:type_name -> google.protobuf.Timestamp
+ 23, // 35: google.cloud.kms.v1.ImportJob.expire_event_time:type_name -> google.protobuf.Timestamp
+ 9, // 36: google.cloud.kms.v1.ImportJob.state:type_name -> google.cloud.kms.v1.ImportJob.ImportJobState
+ 22, // 37: google.cloud.kms.v1.ImportJob.public_key:type_name -> google.cloud.kms.v1.ImportJob.WrappingPublicKey
+ 13, // 38: google.cloud.kms.v1.ImportJob.attestation:type_name -> google.cloud.kms.v1.KeyOperationAttestation
+ 1, // 39: google.cloud.kms.v1.KeyAccessJustificationsPolicy.allowed_access_reasons:type_name -> google.cloud.kms.v1.AccessReason
+ 40, // [40:40] is the sub-list for method output_type
+ 40, // [40:40] is the sub-list for method input_type
+ 40, // [40:40] is the sub-list for extension type_name
+ 40, // [40:40] is the sub-list for extension extendee
+ 0, // [0:40] is the sub-list for field type_name
}
func init() { file_google_cloud_kms_v1_resources_proto_init() }
@@ -2500,129 +2921,7 @@ func file_google_cloud_kms_v1_resources_proto_init() {
if File_google_cloud_kms_v1_resources_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_cloud_kms_v1_resources_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyRing); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CryptoKey); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CryptoKeyVersionTemplate); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyOperationAttestation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CryptoKeyVersion); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PublicKey); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportJob); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExternalProtectionLevelOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyOperationAttestation_CertificateChains); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportJob_WrappingPublicKey); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_google_cloud_kms_v1_resources_proto_msgTypes[1].OneofWrappers = []any{
(*CryptoKey_RotationPeriod)(nil),
}
type x struct{}
@@ -2630,8 +2929,8 @@ func file_google_cloud_kms_v1_resources_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_cloud_kms_v1_resources_proto_rawDesc,
- NumEnums: 8,
- NumMessages: 11,
+ NumEnums: 10,
+ NumMessages: 13,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
index 6ed2a1f89..fe2e40822 100644
--- a/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
+++ b/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/cloud/kms/v1/service.proto
package kmspb
@@ -78,11 +78,9 @@ type ListKeyRingsRequest struct {
func (x *ListKeyRingsRequest) Reset() {
*x = ListKeyRingsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListKeyRingsRequest) String() string {
@@ -93,7 +91,7 @@ func (*ListKeyRingsRequest) ProtoMessage() {}
func (x *ListKeyRingsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -180,11 +178,9 @@ type ListCryptoKeysRequest struct {
func (x *ListCryptoKeysRequest) Reset() {
*x = ListCryptoKeysRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListCryptoKeysRequest) String() string {
@@ -195,7 +191,7 @@ func (*ListCryptoKeysRequest) ProtoMessage() {}
func (x *ListCryptoKeysRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -290,11 +286,9 @@ type ListCryptoKeyVersionsRequest struct {
func (x *ListCryptoKeyVersionsRequest) Reset() {
*x = ListCryptoKeyVersionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListCryptoKeyVersionsRequest) String() string {
@@ -305,7 +299,7 @@ func (*ListCryptoKeyVersionsRequest) ProtoMessage() {}
func (x *ListCryptoKeyVersionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -397,11 +391,9 @@ type ListImportJobsRequest struct {
func (x *ListImportJobsRequest) Reset() {
*x = ListImportJobsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListImportJobsRequest) String() string {
@@ -412,7 +404,7 @@ func (*ListImportJobsRequest) ProtoMessage() {}
func (x *ListImportJobsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -482,11 +474,9 @@ type ListKeyRingsResponse struct {
func (x *ListKeyRingsResponse) Reset() {
*x = ListKeyRingsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListKeyRingsResponse) String() string {
@@ -497,7 +487,7 @@ func (*ListKeyRingsResponse) ProtoMessage() {}
func (x *ListKeyRingsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -553,11 +543,9 @@ type ListCryptoKeysResponse struct {
func (x *ListCryptoKeysResponse) Reset() {
*x = ListCryptoKeysResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListCryptoKeysResponse) String() string {
@@ -568,7 +556,7 @@ func (*ListCryptoKeysResponse) ProtoMessage() {}
func (x *ListCryptoKeysResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -625,11 +613,9 @@ type ListCryptoKeyVersionsResponse struct {
func (x *ListCryptoKeyVersionsResponse) Reset() {
*x = ListCryptoKeyVersionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListCryptoKeyVersionsResponse) String() string {
@@ -640,7 +626,7 @@ func (*ListCryptoKeyVersionsResponse) ProtoMessage() {}
func (x *ListCryptoKeyVersionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -696,11 +682,9 @@ type ListImportJobsResponse struct {
func (x *ListImportJobsResponse) Reset() {
*x = ListImportJobsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListImportJobsResponse) String() string {
@@ -711,7 +695,7 @@ func (*ListImportJobsResponse) ProtoMessage() {}
func (x *ListImportJobsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -761,11 +745,9 @@ type GetKeyRingRequest struct {
func (x *GetKeyRingRequest) Reset() {
*x = GetKeyRingRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetKeyRingRequest) String() string {
@@ -776,7 +758,7 @@ func (*GetKeyRingRequest) ProtoMessage() {}
func (x *GetKeyRingRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -812,11 +794,9 @@ type GetCryptoKeyRequest struct {
func (x *GetCryptoKeyRequest) Reset() {
*x = GetCryptoKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetCryptoKeyRequest) String() string {
@@ -827,7 +807,7 @@ func (*GetCryptoKeyRequest) ProtoMessage() {}
func (x *GetCryptoKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -863,11 +843,9 @@ type GetCryptoKeyVersionRequest struct {
func (x *GetCryptoKeyVersionRequest) Reset() {
*x = GetCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetCryptoKeyVersionRequest) String() string {
@@ -878,7 +856,7 @@ func (*GetCryptoKeyVersionRequest) ProtoMessage() {}
func (x *GetCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -910,15 +888,21 @@ type GetPublicKeyRequest struct {
// Required. The [name][google.cloud.kms.v1.CryptoKeyVersion.name] of the
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] public key to get.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. The [PublicKey][google.cloud.kms.v1.PublicKey] format specified
+ // by the user. This field is required for PQC algorithms. If specified, the
+ // public key will be exported through the
+ // [public_key][google.cloud.kms.v1.PublicKey.public_key] field in the
+ // requested format. Otherwise, the [pem][google.cloud.kms.v1.PublicKey.pem]
+ // field will be populated for non-PQC algorithms, and an error will be
+ // returned for PQC algorithms.
+ PublicKeyFormat PublicKey_PublicKeyFormat `protobuf:"varint,2,opt,name=public_key_format,json=publicKeyFormat,proto3,enum=google.cloud.kms.v1.PublicKey_PublicKeyFormat" json:"public_key_format,omitempty"`
}
func (x *GetPublicKeyRequest) Reset() {
*x = GetPublicKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetPublicKeyRequest) String() string {
@@ -929,7 +913,7 @@ func (*GetPublicKeyRequest) ProtoMessage() {}
func (x *GetPublicKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -951,6 +935,13 @@ func (x *GetPublicKeyRequest) GetName() string {
return ""
}
+func (x *GetPublicKeyRequest) GetPublicKeyFormat() PublicKey_PublicKeyFormat {
+ if x != nil {
+ return x.PublicKeyFormat
+ }
+ return PublicKey_PUBLIC_KEY_FORMAT_UNSPECIFIED
+}
+
// Request message for
// [KeyManagementService.GetImportJob][google.cloud.kms.v1.KeyManagementService.GetImportJob].
type GetImportJobRequest struct {
@@ -965,11 +956,9 @@ type GetImportJobRequest struct {
func (x *GetImportJobRequest) Reset() {
*x = GetImportJobRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetImportJobRequest) String() string {
@@ -980,7 +969,7 @@ func (*GetImportJobRequest) ProtoMessage() {}
func (x *GetImportJobRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1023,11 +1012,9 @@ type CreateKeyRingRequest struct {
func (x *CreateKeyRingRequest) Reset() {
*x = CreateKeyRingRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateKeyRingRequest) String() string {
@@ -1038,7 +1025,7 @@ func (*CreateKeyRingRequest) ProtoMessage() {}
func (x *CreateKeyRingRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1103,11 +1090,9 @@ type CreateCryptoKeyRequest struct {
func (x *CreateCryptoKeyRequest) Reset() {
*x = CreateCryptoKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateCryptoKeyRequest) String() string {
@@ -1118,7 +1103,7 @@ func (*CreateCryptoKeyRequest) ProtoMessage() {}
func (x *CreateCryptoKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1179,11 +1164,9 @@ type CreateCryptoKeyVersionRequest struct {
func (x *CreateCryptoKeyVersionRequest) Reset() {
*x = CreateCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateCryptoKeyVersionRequest) String() string {
@@ -1194,7 +1177,7 @@ func (*CreateCryptoKeyVersionRequest) ProtoMessage() {}
func (x *CreateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1248,7 +1231,9 @@ type ImportCryptoKeyVersionRequest struct {
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion], the
// [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] must be a child of
// [ImportCryptoKeyVersionRequest.parent][google.cloud.kms.v1.ImportCryptoKeyVersionRequest.parent],
- // have been previously created via [ImportCryptoKeyVersion][], and be in
+ // have been previously created via
+ // [ImportCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion],
+ // and be in
// [DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED]
// or
// [IMPORT_FAILED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.IMPORT_FAILED]
@@ -1285,14 +1270,16 @@ type ImportCryptoKeyVersionRequest struct {
//
// this field must contain the concatenation of:
//
- // - An ephemeral AES-256 wrapping key wrapped with the
- // [public_key][google.cloud.kms.v1.ImportJob.public_key] using
- // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty
- // label.
- //
- // - The formatted key to be imported, wrapped with the ephemeral AES-256
- // key using AES-KWP (RFC 5649).
- //
+ //
+ // - An ephemeral AES-256 wrapping key wrapped with the
+ // [public_key][google.cloud.kms.v1.ImportJob.public_key] using
+ // RSAES-OAEP with SHA-1/SHA-256, MGF1 with SHA-1/SHA-256, and an empty
+ // label.
+ //
+ // - The formatted key to be imported, wrapped with the ephemeral AES-256
+ // key using AES-KWP (RFC 5649).
+ //
+ //
//
//
// This format is the same as the format produced by PKCS#11 mechanism
@@ -1312,17 +1299,16 @@ type ImportCryptoKeyVersionRequest struct {
// instead.
//
// Types that are assignable to WrappedKeyMaterial:
+ //
// *ImportCryptoKeyVersionRequest_RsaAesWrappedKey
WrappedKeyMaterial isImportCryptoKeyVersionRequest_WrappedKeyMaterial `protobuf_oneof:"wrapped_key_material"`
}
func (x *ImportCryptoKeyVersionRequest) Reset() {
*x = ImportCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ImportCryptoKeyVersionRequest) String() string {
@@ -1333,7 +1319,7 @@ func (*ImportCryptoKeyVersionRequest) ProtoMessage() {}
func (x *ImportCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1433,11 +1419,9 @@ type CreateImportJobRequest struct {
func (x *CreateImportJobRequest) Reset() {
*x = CreateImportJobRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateImportJobRequest) String() string {
@@ -1448,7 +1432,7 @@ func (*CreateImportJobRequest) ProtoMessage() {}
func (x *CreateImportJobRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1499,11 +1483,9 @@ type UpdateCryptoKeyRequest struct {
func (x *UpdateCryptoKeyRequest) Reset() {
*x = UpdateCryptoKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateCryptoKeyRequest) String() string {
@@ -1514,7 +1496,7 @@ func (*UpdateCryptoKeyRequest) ProtoMessage() {}
func (x *UpdateCryptoKeyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1559,11 +1541,9 @@ type UpdateCryptoKeyVersionRequest struct {
func (x *UpdateCryptoKeyVersionRequest) Reset() {
*x = UpdateCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateCryptoKeyVersionRequest) String() string {
@@ -1574,7 +1554,7 @@ func (*UpdateCryptoKeyVersionRequest) ProtoMessage() {}
func (x *UpdateCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1620,11 +1600,9 @@ type UpdateCryptoKeyPrimaryVersionRequest struct {
func (x *UpdateCryptoKeyPrimaryVersionRequest) Reset() {
*x = UpdateCryptoKeyPrimaryVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateCryptoKeyPrimaryVersionRequest) String() string {
@@ -1635,7 +1613,7 @@ func (*UpdateCryptoKeyPrimaryVersionRequest) ProtoMessage() {}
func (x *UpdateCryptoKeyPrimaryVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1678,11 +1656,9 @@ type DestroyCryptoKeyVersionRequest struct {
func (x *DestroyCryptoKeyVersionRequest) Reset() {
*x = DestroyCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DestroyCryptoKeyVersionRequest) String() string {
@@ -1693,7 +1669,7 @@ func (*DestroyCryptoKeyVersionRequest) ProtoMessage() {}
func (x *DestroyCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1729,11 +1705,9 @@ type RestoreCryptoKeyVersionRequest struct {
func (x *RestoreCryptoKeyVersionRequest) Reset() {
*x = RestoreCryptoKeyVersionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreCryptoKeyVersionRequest) String() string {
@@ -1744,7 +1718,7 @@ func (*RestoreCryptoKeyVersionRequest) ProtoMessage() {}
func (x *RestoreCryptoKeyVersionRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1851,11 +1825,9 @@ type EncryptRequest struct {
func (x *EncryptRequest) Reset() {
*x = EncryptRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EncryptRequest) String() string {
@@ -1866,7 +1838,7 @@ func (*EncryptRequest) ProtoMessage() {}
func (x *EncryptRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1977,11 +1949,9 @@ type DecryptRequest struct {
func (x *DecryptRequest) Reset() {
*x = DecryptRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DecryptRequest) String() string {
@@ -1992,7 +1962,7 @@ func (*DecryptRequest) ProtoMessage() {}
func (x *DecryptRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2138,11 +2108,9 @@ type RawEncryptRequest struct {
func (x *RawEncryptRequest) Reset() {
*x = RawEncryptRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RawEncryptRequest) String() string {
@@ -2153,7 +2121,7 @@ func (*RawEncryptRequest) ProtoMessage() {}
func (x *RawEncryptRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2295,11 +2263,9 @@ type RawDecryptRequest struct {
func (x *RawDecryptRequest) Reset() {
*x = RawDecryptRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RawDecryptRequest) String() string {
@@ -2310,7 +2276,7 @@ func (*RawDecryptRequest) ProtoMessage() {}
func (x *RawDecryptRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2449,11 +2415,9 @@ type AsymmetricSignRequest struct {
func (x *AsymmetricSignRequest) Reset() {
*x = AsymmetricSignRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AsymmetricSignRequest) String() string {
@@ -2464,7 +2428,7 @@ func (*AsymmetricSignRequest) ProtoMessage() {}
func (x *AsymmetricSignRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2553,11 +2517,9 @@ type AsymmetricDecryptRequest struct {
func (x *AsymmetricDecryptRequest) Reset() {
*x = AsymmetricDecryptRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AsymmetricDecryptRequest) String() string {
@@ -2568,7 +2530,7 @@ func (*AsymmetricDecryptRequest) ProtoMessage() {}
func (x *AsymmetricDecryptRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2640,11 +2602,9 @@ type MacSignRequest struct {
func (x *MacSignRequest) Reset() {
*x = MacSignRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MacSignRequest) String() string {
@@ -2655,7 +2615,7 @@ func (*MacSignRequest) ProtoMessage() {}
func (x *MacSignRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2735,7 +2695,8 @@ type MacVerifyRequest struct {
// checksum. [KeyManagementService][google.cloud.kms.v1.KeyManagementService]
// will report an error if the checksum verification fails. If you receive a
// checksum error, your client should verify that
- // CRC32C([MacVerifyRequest.tag][]) is equal to
+ // CRC32C([MacVerifyRequest.mac][google.cloud.kms.v1.MacVerifyRequest.mac]) is
+ // equal to
// [MacVerifyRequest.mac_crc32c][google.cloud.kms.v1.MacVerifyRequest.mac_crc32c],
// and if so, perform a limited number of retries. A persistent mismatch may
// indicate an issue in your computation of the CRC32C checksum. Note: This
@@ -2748,11 +2709,9 @@ type MacVerifyRequest struct {
func (x *MacVerifyRequest) Reset() {
*x = MacVerifyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MacVerifyRequest) String() string {
@@ -2763,7 +2722,7 @@ func (*MacVerifyRequest) ProtoMessage() {}
func (x *MacVerifyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2835,11 +2794,9 @@ type GenerateRandomBytesRequest struct {
func (x *GenerateRandomBytesRequest) Reset() {
*x = GenerateRandomBytesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateRandomBytesRequest) String() string {
@@ -2850,7 +2807,7 @@ func (*GenerateRandomBytesRequest) ProtoMessage() {}
func (x *GenerateRandomBytesRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2952,11 +2909,9 @@ type EncryptResponse struct {
func (x *EncryptResponse) Reset() {
*x = EncryptResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EncryptResponse) String() string {
@@ -2967,7 +2922,7 @@ func (*EncryptResponse) ProtoMessage() {}
func (x *EncryptResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3062,11 +3017,9 @@ type DecryptResponse struct {
func (x *DecryptResponse) Reset() {
*x = DecryptResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DecryptResponse) String() string {
@@ -3077,7 +3030,7 @@ func (*DecryptResponse) ProtoMessage() {}
func (x *DecryptResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3216,11 +3169,9 @@ type RawEncryptResponse struct {
func (x *RawEncryptResponse) Reset() {
*x = RawEncryptResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RawEncryptResponse) String() string {
@@ -3231,7 +3182,7 @@ func (*RawEncryptResponse) ProtoMessage() {}
func (x *RawEncryptResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3391,11 +3342,9 @@ type RawDecryptResponse struct {
func (x *RawDecryptResponse) Reset() {
*x = RawDecryptResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RawDecryptResponse) String() string {
@@ -3406,7 +3355,7 @@ func (*RawDecryptResponse) ProtoMessage() {}
func (x *RawDecryptResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3527,11 +3476,9 @@ type AsymmetricSignResponse struct {
func (x *AsymmetricSignResponse) Reset() {
*x = AsymmetricSignResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AsymmetricSignResponse) String() string {
@@ -3542,7 +3489,7 @@ func (*AsymmetricSignResponse) ProtoMessage() {}
func (x *AsymmetricSignResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3645,11 +3592,9 @@ type AsymmetricDecryptResponse struct {
func (x *AsymmetricDecryptResponse) Reset() {
*x = AsymmetricDecryptResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AsymmetricDecryptResponse) String() string {
@@ -3660,7 +3605,7 @@ func (*AsymmetricDecryptResponse) ProtoMessage() {}
func (x *AsymmetricDecryptResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3752,11 +3697,9 @@ type MacSignResponse struct {
func (x *MacSignResponse) Reset() {
*x = MacSignResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MacSignResponse) String() string {
@@ -3767,7 +3710,7 @@ func (*MacSignResponse) ProtoMessage() {}
func (x *MacSignResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3877,11 +3820,9 @@ type MacVerifyResponse struct {
func (x *MacVerifyResponse) Reset() {
*x = MacVerifyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MacVerifyResponse) String() string {
@@ -3892,7 +3833,7 @@ func (*MacVerifyResponse) ProtoMessage() {}
func (x *MacVerifyResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3976,11 +3917,9 @@ type GenerateRandomBytesResponse struct {
func (x *GenerateRandomBytesResponse) Reset() {
*x = GenerateRandomBytesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GenerateRandomBytesResponse) String() string {
@@ -3991,7 +3930,7 @@ func (*GenerateRandomBytesResponse) ProtoMessage() {}
func (x *GenerateRandomBytesResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4029,6 +3968,7 @@ type Digest struct {
// Required. The message digest.
//
// Types that are assignable to Digest:
+ //
// *Digest_Sha256
// *Digest_Sha384
// *Digest_Sha512
@@ -4037,11 +3977,9 @@ type Digest struct {
func (x *Digest) Reset() {
*x = Digest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Digest) String() string {
@@ -4052,7 +3990,7 @@ func (*Digest) ProtoMessage() {}
func (x *Digest) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4141,11 +4079,9 @@ type LocationMetadata struct {
func (x *LocationMetadata) Reset() {
*x = LocationMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LocationMetadata) String() string {
@@ -4156,7 +4092,7 @@ func (*LocationMetadata) ProtoMessage() {}
func (x *LocationMetadata) ProtoReflect() protoreflect.Message {
mi := &file_google_cloud_kms_v1_service_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4331,905 +4267,911 @@ var file_google_cloud_kms_v1_service_proto_rawDesc = []byte{
0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
- 0x5b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x13,
- 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0xbc, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5f, 0x0a,
+ 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
+ 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x70,
+ 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x54,
+ 0x0a, 0x13, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b,
+ 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x23, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e,
+ 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x52, 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x22, 0x89, 0x02, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f,
+ 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
+ 0x27, 0x0a, 0x0d, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x1d,
+ 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x1a, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0xbc, 0x01, 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
- 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x23,
- 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
- 0x67, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52,
- 0x69, 0x6e, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
- 0x67, 0x22, 0x89, 0x02, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65,
- 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a,
- 0x0d, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x1d, 0x73, 0x6b,
- 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x1a, 0x73, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01,
- 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b,
+ 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbe,
+ 0x03, 0x0a, 0x1d, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x30, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbe, 0x03, 0x0a,
- 0x1d, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
- 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x12, 0x5e, 0x0a, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0,
- 0x41, 0x01, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f,
- 0x6a, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x0a, 0x0b, 0x77, 0x72, 0x61,
- 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12,
- 0x34, 0x0a, 0x13, 0x72, 0x73, 0x61, 0x5f, 0x61, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70,
- 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x48, 0x00, 0x52, 0x10, 0x72, 0x73, 0x61, 0x41, 0x65, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70,
- 0x65, 0x64, 0x4b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0xc6, 0x01,
- 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f,
- 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
- 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e,
- 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0d, 0x69, 0x6d, 0x70,
- 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62,
- 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70,
- 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x69, 0x6d, 0x70,
- 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x61, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x22, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72,
+ 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x09, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x0a, 0x0b, 0x77,
+ 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x4b, 0x65,
+ 0x79, 0x12, 0x34, 0x0a, 0x13, 0x72, 0x73, 0x61, 0x5f, 0x61, 0x65, 0x73, 0x5f, 0x77, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x10, 0x72, 0x73, 0x61, 0x41, 0x65, 0x73, 0x57, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x42, 0x16, 0x0a, 0x14, 0x77, 0x72, 0x61, 0x70, 0x70,
+ 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22,
+ 0xc6, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x21, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0d, 0x69,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a,
+ 0x6f, 0x62, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a,
+ 0x6f, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x69,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x9e, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0a, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbb, 0x01, 0x0a, 0x1d, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbb, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x12, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x10, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61,
- 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x9d, 0x01, 0x0a, 0x24, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d,
- 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a,
- 0x15, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x1e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x66, 0x0a,
- 0x1e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xdb, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01,
- 0x2a, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e,
- 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44,
- 0x61, 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74,
- 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63,
- 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61,
- 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74,
- 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01,
- 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68,
- 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x22, 0xff, 0x02, 0x0a, 0x0e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74,
- 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a,
- 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44,
- 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78,
- 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01,
- 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33,
+ 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x9d, 0x01, 0x0a, 0x24, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61,
+ 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x36, 0x0a, 0x15, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x1e, 0x44, 0x65, 0x73, 0x74, 0x72,
+ 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a,
+ 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
+ 0x66, 0x0a, 0x1e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xdb, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03,
+ 0x0a, 0x01, 0x2a, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d,
+ 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33,
0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
+ 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0,
0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75,
0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xf6, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x70, 0x6c,
- 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41,
- 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74,
- 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x70, 0x6c,
- 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a,
- 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68,
- 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e,
- 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61,
- 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63,
- 0x12, 0x38, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x62, 0x0a, 0x1c, 0x69, 0x6e,
- 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xff, 0x02, 0x0a, 0x0e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65,
+ 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d,
+ 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74,
+ 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x99,
- 0x04, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a,
- 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
- 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
- 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b,
- 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x15, 0x69,
- 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56,
- 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e,
- 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x67, 0x4c, 0x65,
- 0x6e, 0x67, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
- 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41,
- 0x01, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
- 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
- 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41,
- 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x62, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
- 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f,
- 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
- 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a,
- 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xba, 0x02, 0x0a, 0x15, 0x41,
- 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x06, 0x64, 0x69,
- 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x64, 0x69,
- 0x67, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x41, 0x01, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74,
+ 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xf6, 0x03, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09,
+ 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
+ 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x4b, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f,
+ 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f,
+ 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12,
+ 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75,
+ 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
+ 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33,
+ 0x32, 0x63, 0x12, 0x38, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x62, 0x0a, 0x1c,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63,
+ 0x22, 0x99, 0x04, 0x0a, 0x11, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x23, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72,
+ 0x74, 0x65, 0x78, 0x74, 0x12, 0x47, 0x0a, 0x1d, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68,
+ 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a,
+ 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x67,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x71, 0x0a, 0x24, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x21, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
+ 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61,
+ 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x62, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xba, 0x02, 0x0a,
+ 0x15, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x06,
+ 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06,
+ 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74,
+ 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x0c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x17, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e,
- 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x64,
- 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x04,
- 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36,
- 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74,
- 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xd4, 0x01, 0x0a, 0x18, 0x41, 0x73, 0x79, 0x6d,
- 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x63, 0x69,
- 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12,
- 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74,
- 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x63, 0x69,
- 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xb2,
- 0x01, 0x0a, 0x0e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
- 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61,
- 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17,
- 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f,
- 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64,
+ 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xd4, 0x01, 0x0a, 0x18, 0x41, 0x73,
+ 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0a,
+ 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78,
+ 0x74, 0x12, 0x4d, 0x0a, 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f,
+ 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
- 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a,
- 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61,
- 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03, 0x6d, 0x61,
- 0x63, 0x12, 0x3f, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33,
- 0x32, 0x63, 0x22, 0xac, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52,
- 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a,
- 0x0c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74, 0x65, 0x73,
- 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c,
- 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65,
- 0x6c, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70,
- 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63,
- 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x63, 0x69, 0x70,
- 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f,
- 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12,
- 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65,
- 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76,
- 0x65, 0x6c, 0x22, 0xeb, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74,
- 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e,
- 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10,
+ 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63,
+ 0x22, 0xb2, 0x01, 0x0a, 0x0e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
+ 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72,
+ 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a,
+ 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x61, 0x74,
+ 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61,
- 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x21, 0x0a, 0x0c,
- 0x75, 0x73, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12,
- 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65,
- 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52,
- 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x22, 0x87, 0x05, 0x0a, 0x12, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65,
- 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x69, 0x70,
- 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x33, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69,
- 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69,
- 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a,
- 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x09, 0x74, 0x61, 0x67, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x48, 0x0a, 0x11, 0x63,
+ 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x15, 0x0a, 0x03,
+ 0x6d, 0x61, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x03,
+ 0x6d, 0x61, 0x63, 0x12, 0x3f, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
+ 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x22, 0xac, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x79, 0x74,
+ 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63,
+ 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x48, 0x0a, 0x11, 0x63,
0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x5d, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x5f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33,
+ 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32,
+ 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33,
+ 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68,
+ 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x22, 0xeb, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x21,
+ 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
+ 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x22, 0x87, 0x05, 0x0a, 0x12, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x69, 0x70,
+ 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63,
+ 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x12, 0x33, 0x0a, 0x15, 0x69, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
+ 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x09, 0x74, 0x61, 0x67, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x48, 0x0a,
+ 0x11, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33,
+ 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78,
+ 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x5d, 0x0a, 0x1c, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x5f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x64, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33,
+ 0x32, 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61,
+ 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74,
+ 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
+ 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x0a, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x49, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70,
+ 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
+ 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f,
+ 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xbe, 0x03, 0x0a,
+ 0x12, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78,
+ 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e,
- 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
- 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x5f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x50, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63,
- 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65,
- 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33,
- 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x69,
- 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69,
- 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f,
- 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20,
+ 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f,
+ 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63,
0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xbe, 0x03, 0x0a, 0x12, 0x52,
- 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12,
- 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63,
- 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36,
- 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x3c, 0x0a, 0x1a, 0x76, 0x65, 0x72, 0x69,
- 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f,
- 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x76, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
- 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x75,
- 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
- 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x29, 0x76,
- 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61,
- 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x44, 0x61,
- 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65, 0x72, 0x69,
- 0x66, 0x69, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56,
- 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xcb, 0x02, 0x0a, 0x16,
- 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x34, 0x0a, 0x16,
- 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f,
- 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x76, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72, 0x63, 0x33,
- 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61,
- 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x90, 0x02, 0x0a, 0x19, 0x41, 0x73,
- 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x69, 0x6e,
- 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x69,
- 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65,
- 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x70, 0x6c,
- 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a,
- 0x1a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72,
- 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65,
- 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x3c, 0x0a, 0x1a, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78,
+ 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65,
+ 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x60, 0x0a, 0x2d, 0x76, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x29, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x51, 0x0a, 0x25, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x22, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xcb, 0x02,
+ 0x0a, 0x16, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x73,
+ 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x34,
+ 0x0a, 0x16, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73,
+ 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x43, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
+ 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72,
+ 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74,
+ 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x90, 0x02, 0x0a, 0x19,
+ 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6c,
+ 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x70, 0x6c, 0x61, 0x69, 0x6e,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f,
+ 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12,
+ 0x3c, 0x0a, 0x1a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x70, 0x68,
+ 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x69, 0x70,
+ 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a,
+ 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70,
+ 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xf6,
+ 0x01, 0x0a, 0x0f, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f,
+ 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
+ 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x56,
+ 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x76,
+ 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63,
+ 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x69, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x2e, 0x0a,
+ 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72,
+ 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x65, 0x64, 0x4d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a,
+ 0x1a, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70,
0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74,
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f,
- 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xf6, 0x01, 0x0a,
- 0x0f, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x03, 0x6d, 0x61, 0x63, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74,
- 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33,
- 0x32, 0x63, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
+ 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x6f, 0x0a, 0x1b,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79,
+ 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x3c, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x60, 0x0a,
+ 0x06, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35,
+ 0x36, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35,
+ 0x36, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x12, 0x18, 0x0a, 0x06, 0x73,
+ 0x68, 0x61, 0x35, 0x31, 0x32, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73,
+ 0x68, 0x61, 0x35, 0x31, 0x32, 0x42, 0x08, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22,
+ 0x5c, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x73, 0x6d, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c,
+ 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x73, 0x6d, 0x41,
+ 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6b, 0x6d, 0x5f,
+ 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0c, 0x65, 0x6b, 0x6d, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x32, 0x90, 0x2e,
+ 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4b,
+ 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e,
+ 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xb2, 0x02, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72,
- 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x65, 0x72,
- 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65,
- 0x64, 0x44, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76,
- 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6d, 0x61, 0x63, 0x5f, 0x63, 0x72, 0x63, 0x33,
- 0x32, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x4d, 0x61, 0x63, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12, 0x3c, 0x0a, 0x1a, 0x76,
- 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x18, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x49, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x69, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f,
- 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x6f, 0x0a, 0x1b, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3c, 0x0a,
- 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x60, 0x0a, 0x06, 0x44,
- 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x12,
- 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48,
- 0x00, 0x52, 0x06, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x68, 0x61,
- 0x35, 0x31, 0x32, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x68, 0x61,
- 0x35, 0x31, 0x32, 0x42, 0x08, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x5c, 0x0a,
- 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x73, 0x6d, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62,
- 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x73, 0x6d, 0x41, 0x76, 0x61,
- 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6b, 0x6d, 0x5f, 0x61, 0x76,
- 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x65,
- 0x6b, 0x6d, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x32, 0x90, 0x2e, 0x0a, 0x14,
- 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79,
- 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e,
- 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69,
- 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79,
+ 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x73, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
+ 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70,
+ 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d,
+ 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x4a, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x8f, 0x01, 0x0a,
+ 0x0a, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e,
+ 0x67, 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e,
+ 0x12, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2,
+ 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69,
- 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x73, 0x12, 0xde, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f,
+ 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
- 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x4f, 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0xc0, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
+ 0x65, 0x79, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0xda, 0x41,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65,
+ 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69,
+ 0x63, 0x4b, 0x65, 0x79, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f,
+ 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
+ 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22,
+ 0x48, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39,
+ 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f,
+ 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79,
+ 0x52, 0x69, 0x6e, 0x67, 0x22, 0x5c, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
+ 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f,
+ 0x72, 0x69, 0x6e, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f,
+ 0x72, 0x69, 0x6e, 0x67, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
+ 0x67, 0x73, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x0a, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65,
+ 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x73, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0xda, 0x41, 0x19,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x63, 0x3a,
+ 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d,
0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f,
- 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a,
- 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12,
- 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x8f, 0x01, 0x0a, 0x0a, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x22,
- 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c,
- 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a,
- 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a,
- 0x0c, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x48, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
- 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
- 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a,
- 0x7d, 0x12, 0xcb, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
- 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x47, 0x65, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x22, 0x5c, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4f,
- 0x12, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0xc0, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
- 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
- 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x66, 0xda, 0x41, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x12, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52,
- 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0xa2, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4a, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x70,
- 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x48, 0xda,
- 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x12, 0x39, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b,
- 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb6, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x69,
- 0x6e, 0x67, 0x22, 0x5c, 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65,
- 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x64, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69,
- 0x6e, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69,
- 0x6e, 0x67, 0x22, 0x2c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
- 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x0a, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52,
- 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x73, 0x12, 0xfb, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79,
+ 0x6e, 0x73, 0x12, 0xd4, 0x01, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79,
0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x85, 0x01, 0xda, 0x41, 0x19, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x63, 0x3a, 0x12, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x22, 0x4d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a,
- 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0xd4, 0x01, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59, 0x3a, 0x01,
- 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a,
- 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x3a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f,
- 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49,
- 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x69,
- 0x64, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x47, 0x3a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x22, 0x39,
- 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69,
- 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x0f, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x2b, 0x2e,
+ 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x59,
+ 0x3a, 0x01, 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x3a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xcf, 0x01, 0x0a, 0x0f, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x2b, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0xda, 0x41, 0x16, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52, 0x3a, 0x0a, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
+ 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x22, 0x6f, 0xda, 0x41, 0x1f, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62,
+ 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x0a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62,
+ 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x02,
- 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67,
+ 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x0f,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12,
+ 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f,
- 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x3a, 0x12, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x32, 0x60, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x22, 0x71, 0xda, 0x41,
+ 0x16, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52, 0x3a, 0x0a, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x32, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
+ 0x93, 0x02, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x9d, 0x01, 0xda, 0x41, 0x1e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x76, 0x3a,
+ 0x12, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x32, 0x60, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
+ 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69,
+ 0x6d, 0x61, 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x22, 0x76, 0xda, 0x41, 0x1a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x01, 0x2a, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79,
+ 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d,
+ 0x61, 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44,
+ 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73,
+ 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f,
0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63,
0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x2f, 0x2a, 0x7d, 0x12, 0xf2, 0x01, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x50, 0x72, 0x69, 0x6d, 0x61,
- 0x72, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
- 0x22, 0x76, 0xda, 0x41, 0x1a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x53, 0x3a, 0x01, 0x2a, 0x22, 0x4e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
- 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69,
- 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73,
- 0x2f, 0x2a, 0x7d, 0x3a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x73,
- 0x74, 0x72, 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x72,
- 0x6f, 0x79, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x3a,
- 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a,
- 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x52, 0x65,
+ 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0xde, 0x01, 0x0a, 0x17,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
0x73, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5a,
- 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a,
- 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
- 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x45,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61, 0x69, 0x6e,
- 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f,
- 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0x67, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xb4, 0x01, 0x0a,
+ 0x07, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0f, 0x6e, 0x61, 0x6d,
- 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a, 0x01, 0x2a, 0x22,
+ 0x42, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12,
+ 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0xda, 0x41, 0x0f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
+ 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x3a, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x0a, 0x52,
+ 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x5d, 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
- 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x7d,
- 0x3a, 0x64, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x77,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61,
- 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b,
- 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5d,
- 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a,
- 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
- 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01,
- 0x0a, 0x0a, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26, 0x2e, 0x67,
+ 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12,
+ 0xc2, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77,
+ 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5d, 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79,
+ 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x44, 0x65, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73,
+ 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x75, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x61, 0x3a, 0x01, 0x2a, 0x22, 0x5c, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xf0, 0x01, 0x0a, 0x11, 0x41, 0x73, 0x79, 0x6d,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x2d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, 0x77, 0x44, 0x65,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x5d, 0x3a, 0x01, 0x2a, 0x22, 0x58, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
+ 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0xda, 0x41,
+ 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x64, 0x3a, 0x01, 0x2a, 0x22, 0x5f, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x07, 0x4d,
+ 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63,
+ 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x6c, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c,
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69,
0x6e, 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73,
0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x61, 0x77, 0x44, 0x65, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x12, 0xe0, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d,
- 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72,
- 0x69, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x75,
- 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x61, 0x3a, 0x01, 0x2a, 0x22, 0x5c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
- 0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f,
- 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xf0, 0x01, 0x0a, 0x11, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65,
- 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0x2d, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x41, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0xda, 0x41, 0x0f, 0x6e,
- 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x74, 0x65, 0x78, 0x74, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x64, 0x3a, 0x01, 0x2a, 0x22, 0x5f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12,
+ 0xce, 0x01, 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x25, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65,
+ 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x72, 0xda, 0x41,
+ 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x6d, 0x61, 0x63, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x5c, 0x3a, 0x01, 0x2a, 0x22, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e,
0x67, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f,
0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x44, 0x65, 0x63, 0x72, 0x79, 0x70, 0x74, 0x12, 0xc2, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x63,
- 0x53, 0x69, 0x67, 0x6e, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x53, 0x69,
- 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79,
+ 0x12, 0xe7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e,
+ 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74,
+ 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x4d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x6c, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x5a, 0x3a, 0x01, 0x2a, 0x22, 0x55, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
- 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
- 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a,
- 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x53, 0x69, 0x67, 0x6e, 0x12, 0xce, 0x01,
- 0x0a, 0x09, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x25, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69,
- 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x72, 0xda, 0x41, 0x0d, 0x6e,
- 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x6d, 0x61, 0x63, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x5c, 0x3a, 0x01, 0x2a, 0x22, 0x57, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73,
- 0x2f, 0x2a, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x2a, 0x2f,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x61, 0x63, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0xe7,
- 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f,
- 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0xda, 0x41, 0x26, 0x6c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x62, 0x79,
- 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c,
- 0x65, 0x76, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22, 0x39, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e,
- 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
- 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61,
- 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
- 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x7f,
- 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x4b, 0x6d, 0x73, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61,
- 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62,
- 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79,
+ 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0xda, 0x41, 0x26,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f,
+ 0x62, 0x79, 0x74, 0x65, 0x73, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x3a, 0x01, 0x2a, 0x22,
+ 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52,
+ 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x42, 0x79, 0x74, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70,
+ 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
+ 0x42, 0x7c, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x4b, 0x6d, 0x73,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73,
+ 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73,
+ 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -5245,7 +5187,7 @@ func file_google_cloud_kms_v1_service_proto_rawDescGZIP() []byte {
}
var file_google_cloud_kms_v1_service_proto_msgTypes = make([]protoimpl.MessageInfo, 43)
-var file_google_cloud_kms_v1_service_proto_goTypes = []interface{}{
+var file_google_cloud_kms_v1_service_proto_goTypes = []any{
(*ListKeyRingsRequest)(nil), // 0: google.cloud.kms.v1.ListKeyRingsRequest
(*ListCryptoKeysRequest)(nil), // 1: google.cloud.kms.v1.ListCryptoKeysRequest
(*ListCryptoKeyVersionsRequest)(nil), // 2: google.cloud.kms.v1.ListCryptoKeyVersionsRequest
@@ -5294,11 +5236,12 @@ var file_google_cloud_kms_v1_service_proto_goTypes = []interface{}{
(*CryptoKey)(nil), // 45: google.cloud.kms.v1.CryptoKey
(*CryptoKeyVersion)(nil), // 46: google.cloud.kms.v1.CryptoKeyVersion
(*ImportJob)(nil), // 47: google.cloud.kms.v1.ImportJob
- (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 48: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- (*fieldmaskpb.FieldMask)(nil), // 49: google.protobuf.FieldMask
- (*wrapperspb.Int64Value)(nil), // 50: google.protobuf.Int64Value
- (ProtectionLevel)(0), // 51: google.cloud.kms.v1.ProtectionLevel
- (*PublicKey)(nil), // 52: google.cloud.kms.v1.PublicKey
+ (PublicKey_PublicKeyFormat)(0), // 48: google.cloud.kms.v1.PublicKey.PublicKeyFormat
+ (CryptoKeyVersion_CryptoKeyVersionAlgorithm)(0), // 49: google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ (*fieldmaskpb.FieldMask)(nil), // 50: google.protobuf.FieldMask
+ (*wrapperspb.Int64Value)(nil), // 51: google.protobuf.Int64Value
+ (ProtectionLevel)(0), // 52: google.cloud.kms.v1.ProtectionLevel
+ (*PublicKey)(nil), // 53: google.cloud.kms.v1.PublicKey
}
var file_google_cloud_kms_v1_service_proto_depIdxs = []int32{
43, // 0: google.cloud.kms.v1.ListCryptoKeysRequest.version_view:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionView
@@ -5307,111 +5250,112 @@ var file_google_cloud_kms_v1_service_proto_depIdxs = []int32{
45, // 3: google.cloud.kms.v1.ListCryptoKeysResponse.crypto_keys:type_name -> google.cloud.kms.v1.CryptoKey
46, // 4: google.cloud.kms.v1.ListCryptoKeyVersionsResponse.crypto_key_versions:type_name -> google.cloud.kms.v1.CryptoKeyVersion
47, // 5: google.cloud.kms.v1.ListImportJobsResponse.import_jobs:type_name -> google.cloud.kms.v1.ImportJob
- 44, // 6: google.cloud.kms.v1.CreateKeyRingRequest.key_ring:type_name -> google.cloud.kms.v1.KeyRing
- 45, // 7: google.cloud.kms.v1.CreateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey
- 46, // 8: google.cloud.kms.v1.CreateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion
- 48, // 9: google.cloud.kms.v1.ImportCryptoKeyVersionRequest.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
- 47, // 10: google.cloud.kms.v1.CreateImportJobRequest.import_job:type_name -> google.cloud.kms.v1.ImportJob
- 45, // 11: google.cloud.kms.v1.UpdateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey
- 49, // 12: google.cloud.kms.v1.UpdateCryptoKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
- 46, // 13: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion
- 49, // 14: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.update_mask:type_name -> google.protobuf.FieldMask
- 50, // 15: google.cloud.kms.v1.EncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 16: google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 17: google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 18: google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 19: google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 20: google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 21: google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 22: google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 23: google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 24: google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
- 41, // 25: google.cloud.kms.v1.AsymmetricSignRequest.digest:type_name -> google.cloud.kms.v1.Digest
- 50, // 26: google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 27: google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 28: google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 29: google.cloud.kms.v1.MacSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 30: google.cloud.kms.v1.MacVerifyRequest.data_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 31: google.cloud.kms.v1.MacVerifyRequest.mac_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 32: google.cloud.kms.v1.GenerateRandomBytesRequest.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 33: google.cloud.kms.v1.EncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 34: google.cloud.kms.v1.EncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 35: google.cloud.kms.v1.DecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 36: google.cloud.kms.v1.DecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 37: google.cloud.kms.v1.RawEncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
- 50, // 38: google.cloud.kms.v1.RawEncryptResponse.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 39: google.cloud.kms.v1.RawEncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 40: google.cloud.kms.v1.RawDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 41: google.cloud.kms.v1.RawDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 42: google.cloud.kms.v1.AsymmetricSignResponse.signature_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 43: google.cloud.kms.v1.AsymmetricSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 44: google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 45: google.cloud.kms.v1.AsymmetricDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 46: google.cloud.kms.v1.MacSignResponse.mac_crc32c:type_name -> google.protobuf.Int64Value
- 51, // 47: google.cloud.kms.v1.MacSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 51, // 48: google.cloud.kms.v1.MacVerifyResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
- 50, // 49: google.cloud.kms.v1.GenerateRandomBytesResponse.data_crc32c:type_name -> google.protobuf.Int64Value
- 0, // 50: google.cloud.kms.v1.KeyManagementService.ListKeyRings:input_type -> google.cloud.kms.v1.ListKeyRingsRequest
- 1, // 51: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:input_type -> google.cloud.kms.v1.ListCryptoKeysRequest
- 2, // 52: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:input_type -> google.cloud.kms.v1.ListCryptoKeyVersionsRequest
- 3, // 53: google.cloud.kms.v1.KeyManagementService.ListImportJobs:input_type -> google.cloud.kms.v1.ListImportJobsRequest
- 8, // 54: google.cloud.kms.v1.KeyManagementService.GetKeyRing:input_type -> google.cloud.kms.v1.GetKeyRingRequest
- 9, // 55: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:input_type -> google.cloud.kms.v1.GetCryptoKeyRequest
- 10, // 56: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:input_type -> google.cloud.kms.v1.GetCryptoKeyVersionRequest
- 11, // 57: google.cloud.kms.v1.KeyManagementService.GetPublicKey:input_type -> google.cloud.kms.v1.GetPublicKeyRequest
- 12, // 58: google.cloud.kms.v1.KeyManagementService.GetImportJob:input_type -> google.cloud.kms.v1.GetImportJobRequest
- 13, // 59: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:input_type -> google.cloud.kms.v1.CreateKeyRingRequest
- 14, // 60: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:input_type -> google.cloud.kms.v1.CreateCryptoKeyRequest
- 15, // 61: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:input_type -> google.cloud.kms.v1.CreateCryptoKeyVersionRequest
- 16, // 62: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:input_type -> google.cloud.kms.v1.ImportCryptoKeyVersionRequest
- 17, // 63: google.cloud.kms.v1.KeyManagementService.CreateImportJob:input_type -> google.cloud.kms.v1.CreateImportJobRequest
- 18, // 64: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:input_type -> google.cloud.kms.v1.UpdateCryptoKeyRequest
- 19, // 65: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyVersionRequest
- 20, // 66: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest
- 21, // 67: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:input_type -> google.cloud.kms.v1.DestroyCryptoKeyVersionRequest
- 22, // 68: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:input_type -> google.cloud.kms.v1.RestoreCryptoKeyVersionRequest
- 23, // 69: google.cloud.kms.v1.KeyManagementService.Encrypt:input_type -> google.cloud.kms.v1.EncryptRequest
- 24, // 70: google.cloud.kms.v1.KeyManagementService.Decrypt:input_type -> google.cloud.kms.v1.DecryptRequest
- 25, // 71: google.cloud.kms.v1.KeyManagementService.RawEncrypt:input_type -> google.cloud.kms.v1.RawEncryptRequest
- 26, // 72: google.cloud.kms.v1.KeyManagementService.RawDecrypt:input_type -> google.cloud.kms.v1.RawDecryptRequest
- 27, // 73: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:input_type -> google.cloud.kms.v1.AsymmetricSignRequest
- 28, // 74: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:input_type -> google.cloud.kms.v1.AsymmetricDecryptRequest
- 29, // 75: google.cloud.kms.v1.KeyManagementService.MacSign:input_type -> google.cloud.kms.v1.MacSignRequest
- 30, // 76: google.cloud.kms.v1.KeyManagementService.MacVerify:input_type -> google.cloud.kms.v1.MacVerifyRequest
- 31, // 77: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:input_type -> google.cloud.kms.v1.GenerateRandomBytesRequest
- 4, // 78: google.cloud.kms.v1.KeyManagementService.ListKeyRings:output_type -> google.cloud.kms.v1.ListKeyRingsResponse
- 5, // 79: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:output_type -> google.cloud.kms.v1.ListCryptoKeysResponse
- 6, // 80: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:output_type -> google.cloud.kms.v1.ListCryptoKeyVersionsResponse
- 7, // 81: google.cloud.kms.v1.KeyManagementService.ListImportJobs:output_type -> google.cloud.kms.v1.ListImportJobsResponse
- 44, // 82: google.cloud.kms.v1.KeyManagementService.GetKeyRing:output_type -> google.cloud.kms.v1.KeyRing
- 45, // 83: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
- 46, // 84: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 52, // 85: google.cloud.kms.v1.KeyManagementService.GetPublicKey:output_type -> google.cloud.kms.v1.PublicKey
- 47, // 86: google.cloud.kms.v1.KeyManagementService.GetImportJob:output_type -> google.cloud.kms.v1.ImportJob
- 44, // 87: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:output_type -> google.cloud.kms.v1.KeyRing
- 45, // 88: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
- 46, // 89: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 46, // 90: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 47, // 91: google.cloud.kms.v1.KeyManagementService.CreateImportJob:output_type -> google.cloud.kms.v1.ImportJob
- 45, // 92: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
- 46, // 93: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 45, // 94: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:output_type -> google.cloud.kms.v1.CryptoKey
- 46, // 95: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 46, // 96: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
- 32, // 97: google.cloud.kms.v1.KeyManagementService.Encrypt:output_type -> google.cloud.kms.v1.EncryptResponse
- 33, // 98: google.cloud.kms.v1.KeyManagementService.Decrypt:output_type -> google.cloud.kms.v1.DecryptResponse
- 34, // 99: google.cloud.kms.v1.KeyManagementService.RawEncrypt:output_type -> google.cloud.kms.v1.RawEncryptResponse
- 35, // 100: google.cloud.kms.v1.KeyManagementService.RawDecrypt:output_type -> google.cloud.kms.v1.RawDecryptResponse
- 36, // 101: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:output_type -> google.cloud.kms.v1.AsymmetricSignResponse
- 37, // 102: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:output_type -> google.cloud.kms.v1.AsymmetricDecryptResponse
- 38, // 103: google.cloud.kms.v1.KeyManagementService.MacSign:output_type -> google.cloud.kms.v1.MacSignResponse
- 39, // 104: google.cloud.kms.v1.KeyManagementService.MacVerify:output_type -> google.cloud.kms.v1.MacVerifyResponse
- 40, // 105: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:output_type -> google.cloud.kms.v1.GenerateRandomBytesResponse
- 78, // [78:106] is the sub-list for method output_type
- 50, // [50:78] is the sub-list for method input_type
- 50, // [50:50] is the sub-list for extension type_name
- 50, // [50:50] is the sub-list for extension extendee
- 0, // [0:50] is the sub-list for field type_name
+ 48, // 6: google.cloud.kms.v1.GetPublicKeyRequest.public_key_format:type_name -> google.cloud.kms.v1.PublicKey.PublicKeyFormat
+ 44, // 7: google.cloud.kms.v1.CreateKeyRingRequest.key_ring:type_name -> google.cloud.kms.v1.KeyRing
+ 45, // 8: google.cloud.kms.v1.CreateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey
+ 46, // 9: google.cloud.kms.v1.CreateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion
+ 49, // 10: google.cloud.kms.v1.ImportCryptoKeyVersionRequest.algorithm:type_name -> google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm
+ 47, // 11: google.cloud.kms.v1.CreateImportJobRequest.import_job:type_name -> google.cloud.kms.v1.ImportJob
+ 45, // 12: google.cloud.kms.v1.UpdateCryptoKeyRequest.crypto_key:type_name -> google.cloud.kms.v1.CryptoKey
+ 50, // 13: google.cloud.kms.v1.UpdateCryptoKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 46, // 14: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.crypto_key_version:type_name -> google.cloud.kms.v1.CryptoKeyVersion
+ 50, // 15: google.cloud.kms.v1.UpdateCryptoKeyVersionRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 51, // 16: google.cloud.kms.v1.EncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 17: google.cloud.kms.v1.EncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 18: google.cloud.kms.v1.DecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 19: google.cloud.kms.v1.DecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 20: google.cloud.kms.v1.RawEncryptRequest.plaintext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 21: google.cloud.kms.v1.RawEncryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 22: google.cloud.kms.v1.RawEncryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 23: google.cloud.kms.v1.RawDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 24: google.cloud.kms.v1.RawDecryptRequest.additional_authenticated_data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 25: google.cloud.kms.v1.RawDecryptRequest.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
+ 41, // 26: google.cloud.kms.v1.AsymmetricSignRequest.digest:type_name -> google.cloud.kms.v1.Digest
+ 51, // 27: google.cloud.kms.v1.AsymmetricSignRequest.digest_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 28: google.cloud.kms.v1.AsymmetricSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 29: google.cloud.kms.v1.AsymmetricDecryptRequest.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 30: google.cloud.kms.v1.MacSignRequest.data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 31: google.cloud.kms.v1.MacVerifyRequest.data_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 32: google.cloud.kms.v1.MacVerifyRequest.mac_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 33: google.cloud.kms.v1.GenerateRandomBytesRequest.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 34: google.cloud.kms.v1.EncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 35: google.cloud.kms.v1.EncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 36: google.cloud.kms.v1.DecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 37: google.cloud.kms.v1.DecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 38: google.cloud.kms.v1.RawEncryptResponse.ciphertext_crc32c:type_name -> google.protobuf.Int64Value
+ 51, // 39: google.cloud.kms.v1.RawEncryptResponse.initialization_vector_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 40: google.cloud.kms.v1.RawEncryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 41: google.cloud.kms.v1.RawDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 42: google.cloud.kms.v1.RawDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 43: google.cloud.kms.v1.AsymmetricSignResponse.signature_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 44: google.cloud.kms.v1.AsymmetricSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 45: google.cloud.kms.v1.AsymmetricDecryptResponse.plaintext_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 46: google.cloud.kms.v1.AsymmetricDecryptResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 47: google.cloud.kms.v1.MacSignResponse.mac_crc32c:type_name -> google.protobuf.Int64Value
+ 52, // 48: google.cloud.kms.v1.MacSignResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 52, // 49: google.cloud.kms.v1.MacVerifyResponse.protection_level:type_name -> google.cloud.kms.v1.ProtectionLevel
+ 51, // 50: google.cloud.kms.v1.GenerateRandomBytesResponse.data_crc32c:type_name -> google.protobuf.Int64Value
+ 0, // 51: google.cloud.kms.v1.KeyManagementService.ListKeyRings:input_type -> google.cloud.kms.v1.ListKeyRingsRequest
+ 1, // 52: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:input_type -> google.cloud.kms.v1.ListCryptoKeysRequest
+ 2, // 53: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:input_type -> google.cloud.kms.v1.ListCryptoKeyVersionsRequest
+ 3, // 54: google.cloud.kms.v1.KeyManagementService.ListImportJobs:input_type -> google.cloud.kms.v1.ListImportJobsRequest
+ 8, // 55: google.cloud.kms.v1.KeyManagementService.GetKeyRing:input_type -> google.cloud.kms.v1.GetKeyRingRequest
+ 9, // 56: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:input_type -> google.cloud.kms.v1.GetCryptoKeyRequest
+ 10, // 57: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:input_type -> google.cloud.kms.v1.GetCryptoKeyVersionRequest
+ 11, // 58: google.cloud.kms.v1.KeyManagementService.GetPublicKey:input_type -> google.cloud.kms.v1.GetPublicKeyRequest
+ 12, // 59: google.cloud.kms.v1.KeyManagementService.GetImportJob:input_type -> google.cloud.kms.v1.GetImportJobRequest
+ 13, // 60: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:input_type -> google.cloud.kms.v1.CreateKeyRingRequest
+ 14, // 61: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:input_type -> google.cloud.kms.v1.CreateCryptoKeyRequest
+ 15, // 62: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:input_type -> google.cloud.kms.v1.CreateCryptoKeyVersionRequest
+ 16, // 63: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:input_type -> google.cloud.kms.v1.ImportCryptoKeyVersionRequest
+ 17, // 64: google.cloud.kms.v1.KeyManagementService.CreateImportJob:input_type -> google.cloud.kms.v1.CreateImportJobRequest
+ 18, // 65: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:input_type -> google.cloud.kms.v1.UpdateCryptoKeyRequest
+ 19, // 66: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyVersionRequest
+ 20, // 67: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:input_type -> google.cloud.kms.v1.UpdateCryptoKeyPrimaryVersionRequest
+ 21, // 68: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:input_type -> google.cloud.kms.v1.DestroyCryptoKeyVersionRequest
+ 22, // 69: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:input_type -> google.cloud.kms.v1.RestoreCryptoKeyVersionRequest
+ 23, // 70: google.cloud.kms.v1.KeyManagementService.Encrypt:input_type -> google.cloud.kms.v1.EncryptRequest
+ 24, // 71: google.cloud.kms.v1.KeyManagementService.Decrypt:input_type -> google.cloud.kms.v1.DecryptRequest
+ 25, // 72: google.cloud.kms.v1.KeyManagementService.RawEncrypt:input_type -> google.cloud.kms.v1.RawEncryptRequest
+ 26, // 73: google.cloud.kms.v1.KeyManagementService.RawDecrypt:input_type -> google.cloud.kms.v1.RawDecryptRequest
+ 27, // 74: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:input_type -> google.cloud.kms.v1.AsymmetricSignRequest
+ 28, // 75: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:input_type -> google.cloud.kms.v1.AsymmetricDecryptRequest
+ 29, // 76: google.cloud.kms.v1.KeyManagementService.MacSign:input_type -> google.cloud.kms.v1.MacSignRequest
+ 30, // 77: google.cloud.kms.v1.KeyManagementService.MacVerify:input_type -> google.cloud.kms.v1.MacVerifyRequest
+ 31, // 78: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:input_type -> google.cloud.kms.v1.GenerateRandomBytesRequest
+ 4, // 79: google.cloud.kms.v1.KeyManagementService.ListKeyRings:output_type -> google.cloud.kms.v1.ListKeyRingsResponse
+ 5, // 80: google.cloud.kms.v1.KeyManagementService.ListCryptoKeys:output_type -> google.cloud.kms.v1.ListCryptoKeysResponse
+ 6, // 81: google.cloud.kms.v1.KeyManagementService.ListCryptoKeyVersions:output_type -> google.cloud.kms.v1.ListCryptoKeyVersionsResponse
+ 7, // 82: google.cloud.kms.v1.KeyManagementService.ListImportJobs:output_type -> google.cloud.kms.v1.ListImportJobsResponse
+ 44, // 83: google.cloud.kms.v1.KeyManagementService.GetKeyRing:output_type -> google.cloud.kms.v1.KeyRing
+ 45, // 84: google.cloud.kms.v1.KeyManagementService.GetCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
+ 46, // 85: google.cloud.kms.v1.KeyManagementService.GetCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 53, // 86: google.cloud.kms.v1.KeyManagementService.GetPublicKey:output_type -> google.cloud.kms.v1.PublicKey
+ 47, // 87: google.cloud.kms.v1.KeyManagementService.GetImportJob:output_type -> google.cloud.kms.v1.ImportJob
+ 44, // 88: google.cloud.kms.v1.KeyManagementService.CreateKeyRing:output_type -> google.cloud.kms.v1.KeyRing
+ 45, // 89: google.cloud.kms.v1.KeyManagementService.CreateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
+ 46, // 90: google.cloud.kms.v1.KeyManagementService.CreateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 46, // 91: google.cloud.kms.v1.KeyManagementService.ImportCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 47, // 92: google.cloud.kms.v1.KeyManagementService.CreateImportJob:output_type -> google.cloud.kms.v1.ImportJob
+ 45, // 93: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKey:output_type -> google.cloud.kms.v1.CryptoKey
+ 46, // 94: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 45, // 95: google.cloud.kms.v1.KeyManagementService.UpdateCryptoKeyPrimaryVersion:output_type -> google.cloud.kms.v1.CryptoKey
+ 46, // 96: google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 46, // 97: google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion:output_type -> google.cloud.kms.v1.CryptoKeyVersion
+ 32, // 98: google.cloud.kms.v1.KeyManagementService.Encrypt:output_type -> google.cloud.kms.v1.EncryptResponse
+ 33, // 99: google.cloud.kms.v1.KeyManagementService.Decrypt:output_type -> google.cloud.kms.v1.DecryptResponse
+ 34, // 100: google.cloud.kms.v1.KeyManagementService.RawEncrypt:output_type -> google.cloud.kms.v1.RawEncryptResponse
+ 35, // 101: google.cloud.kms.v1.KeyManagementService.RawDecrypt:output_type -> google.cloud.kms.v1.RawDecryptResponse
+ 36, // 102: google.cloud.kms.v1.KeyManagementService.AsymmetricSign:output_type -> google.cloud.kms.v1.AsymmetricSignResponse
+ 37, // 103: google.cloud.kms.v1.KeyManagementService.AsymmetricDecrypt:output_type -> google.cloud.kms.v1.AsymmetricDecryptResponse
+ 38, // 104: google.cloud.kms.v1.KeyManagementService.MacSign:output_type -> google.cloud.kms.v1.MacSignResponse
+ 39, // 105: google.cloud.kms.v1.KeyManagementService.MacVerify:output_type -> google.cloud.kms.v1.MacVerifyResponse
+ 40, // 106: google.cloud.kms.v1.KeyManagementService.GenerateRandomBytes:output_type -> google.cloud.kms.v1.GenerateRandomBytesResponse
+ 79, // [79:107] is the sub-list for method output_type
+ 51, // [51:79] is the sub-list for method input_type
+ 51, // [51:51] is the sub-list for extension type_name
+ 51, // [51:51] is the sub-list for extension extendee
+ 0, // [0:51] is the sub-list for field type_name
}
func init() { file_google_cloud_kms_v1_service_proto_init() }
@@ -5420,528 +5364,10 @@ func file_google_cloud_kms_v1_service_proto_init() {
return
}
file_google_cloud_kms_v1_resources_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_cloud_kms_v1_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListKeyRingsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListCryptoKeysRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListCryptoKeyVersionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListImportJobsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListKeyRingsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListCryptoKeysResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListCryptoKeyVersionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListImportJobsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyRingRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetCryptoKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetPublicKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetImportJobRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateKeyRingRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateCryptoKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateImportJobRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCryptoKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateCryptoKeyPrimaryVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DestroyCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreCryptoKeyVersionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EncryptRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DecryptRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RawEncryptRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RawDecryptRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AsymmetricSignRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AsymmetricDecryptRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacSignRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacVerifyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GenerateRandomBytesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EncryptResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DecryptResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RawEncryptResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RawDecryptResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AsymmetricSignResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AsymmetricDecryptResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacSignResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MacVerifyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GenerateRandomBytesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Digest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LocationMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []interface{}{
+ file_google_cloud_kms_v1_service_proto_msgTypes[16].OneofWrappers = []any{
(*ImportCryptoKeyVersionRequest_RsaAesWrappedKey)(nil),
}
- file_google_cloud_kms_v1_service_proto_msgTypes[41].OneofWrappers = []interface{}{
+ file_google_cloud_kms_v1_service_proto_msgTypes[41].OneofWrappers = []any{
(*Digest_Sha256)(nil),
(*Digest_Sha384)(nil),
(*Digest_Sha512)(nil),
diff --git a/vendor/cloud.google.com/go/kms/internal/version.go b/vendor/cloud.google.com/go/kms/internal/version.go
index 30aa7c4bb..b1672963f 100644
--- a/vendor/cloud.google.com/go/kms/internal/version.go
+++ b/vendor/cloud.google.com/go/kms/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.15.5"
+const Version = "1.21.0"
diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md
new file mode 100644
index 000000000..392ccee56
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md
@@ -0,0 +1,168 @@
+# Changes
+
+## [0.6.5](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.4...longrunning/v0.6.5) (2025-03-06)
+
+
+### Bug Fixes
+
+* **longrunning:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec))
+
+## [0.6.4](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.3...longrunning/v0.6.4) (2025-01-02)
+
+
+### Bug Fixes
+
+* **longrunning:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.6.3](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.2...longrunning/v0.6.3) (2024-11-19)
+
+
+### Documentation
+
+* **longrunning:** Clarity and typo fixes for documentation ([c1e936d](https://github.com/googleapis/google-cloud-go/commit/c1e936df6527933f5e7c31be0f95aa46ff2c0e61))
+* **longrunning:** Fix example rpc naming ([c1e936d](https://github.com/googleapis/google-cloud-go/commit/c1e936df6527933f5e7c31be0f95aa46ff2c0e61))
+
+## [0.6.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.1...longrunning/v0.6.2) (2024-10-23)
+
+
+### Bug Fixes
+
+* **longrunning:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **longrunning:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+
+## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.6.0...longrunning/v0.6.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.12...longrunning/v0.6.0) (2024-08-20)
+
+
+### Features
+
+* **longrunning:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+
+## [0.5.12](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.11...longrunning/v0.5.12) (2024-08-08)
+
+
+### Bug Fixes
+
+* **longrunning:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+## [0.5.11](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.10...longrunning/v0.5.11) (2024-07-24)
+
+
+### Bug Fixes
+
+* **longrunning:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.5.10](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.9...longrunning/v0.5.10) (2024-07-10)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
+## [0.5.9](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.8...longrunning/v0.5.9) (2024-07-01)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+
+## [0.5.8](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.7...longrunning/v0.5.8) (2024-06-26)
+
+
+### Bug Fixes
+
+* **longrunning:** Enable new auth lib ([b95805f](https://github.com/googleapis/google-cloud-go/commit/b95805f4c87d3e8d10ea23bd7a2d68d7a4157568))
+
+## [0.5.7](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.6...longrunning/v0.5.7) (2024-05-01)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+
+## [0.5.6](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.5...longrunning/v0.5.6) (2024-03-14)
+
+
+### Bug Fixes
+
+* **longrunning:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## [0.5.5](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.4...longrunning/v0.5.5) (2024-01-30)
+
+
+### Bug Fixes
+
+* **longrunning:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698))
+
+## [0.5.4](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.3...longrunning/v0.5.4) (2023-11-01)
+
+
+### Bug Fixes
+
+* **longrunning:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
+
+## [0.5.3](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.2...longrunning/v0.5.3) (2023-10-26)
+
+
+### Bug Fixes
+
+* **longrunning:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.1...longrunning/v0.5.2) (2023-10-12)
+
+
+### Bug Fixes
+
+* **longrunning:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.0...longrunning/v0.5.1) (2023-06-20)
+
+
+### Bug Fixes
+
+* **longrunning:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.2...longrunning/v0.5.0) (2023-05-30)
+
+
+### Features
+
+* **longrunning:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6))
+
+## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.1...longrunning/v0.4.2) (2023-05-08)
+
+
+### Bug Fixes
+
+* **longrunning:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef))
+
+## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.0...longrunning/v0.4.1) (2023-02-14)
+
+
+### Bug Fixes
+
+* **longrunning:** Properly parse errors with apierror ([#7392](https://github.com/googleapis/google-cloud-go/issues/7392)) ([e768e48](https://github.com/googleapis/google-cloud-go/commit/e768e487e10b197ba42a2339014136d066190610))
+
+## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.3.0...longrunning/v0.4.0) (2023-01-04)
+
+
+### Features
+
+* **longrunning:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
+
+## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03)
+
+
+### Features
+
+* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
+
+## v0.1.0
+
+Initial release.
diff --git a/vendor/cloud.google.com/go/longrunning/LICENSE b/vendor/cloud.google.com/go/longrunning/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/longrunning/README.md b/vendor/cloud.google.com/go/longrunning/README.md
new file mode 100644
index 000000000..a07f3093f
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/README.md
@@ -0,0 +1,26 @@
+# longrunning
+
+[](https://pkg.go.dev/cloud.google.com/go/longrunning)
+
+A helper library for working with long running operations.
+
+## Install
+
+```bash
+go get cloud.google.com/go/longrunning
+```
+
+## Go Version Support
+
+See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
+section in the root directory's README.
+
+## Contributing
+
+Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms. See
+[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go
new file mode 100644
index 000000000..966f09911
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go
@@ -0,0 +1,69 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package longrunning
+
+import (
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ "google.golang.org/api/iterator"
+)
+
+// OperationIterator manages a stream of *longrunningpb.Operation.
+type OperationIterator struct {
+ items []*longrunningpb.Operation
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *OperationIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
+ var item *longrunningpb.Operation
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *OperationIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *OperationIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go
new file mode 100644
index 000000000..aac4cd795
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/auxiliary_go123.go
@@ -0,0 +1,32 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package longrunning
+
+import (
+ "iter"
+
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *OperationIterator) All() iter.Seq2[*longrunningpb.Operation, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go
new file mode 100644
index 000000000..11436b7c4
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go
@@ -0,0 +1,79 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+// Package longrunning is an auto-generated package for the
+// Long Running Operations API.
+//
+// # General documentation
+//
+// For information that is relevant for all client libraries please reference
+// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
+// page includes:
+//
+// - [Authentication and Authorization]
+// - [Timeouts and Cancellation]
+// - [Testing against Client Libraries]
+// - [Debugging Client Libraries]
+// - [Inspecting errors]
+//
+// # Example usage
+//
+// To get started with this package, create a client.
+//
+// // go get cloud.google.com/go/longrunning/autogen@latest
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := longrunning.NewOperationsClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// The client will use your default application credentials. Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+// The returned client must be Closed when it is done being used.
+//
+// # Using the Client
+//
+// The following is an example of making an API call with the newly created client, mentioned above.
+//
+// req := &longrunningpb.CancelOperationRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#CancelOperationRequest.
+// }
+// err = c.CancelOperation(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
+// }
+//
+// # Use of Context
+//
+// The ctx passed to NewOperationsClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
+// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
+// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
+// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
+// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
+package longrunning // import "cloud.google.com/go/longrunning/autogen"
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go
new file mode 100644
index 000000000..f09714b9b
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go
@@ -0,0 +1,30 @@
+// Copyright 2020, Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package longrunning
+
+import (
+ "context"
+
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+)
+
+// InternalFromConn is for use by the Google Cloud Libraries only.
+//
+// Deprecated. Use `NewOperationsClient(ctx, option.WithGRPCConn(conn))` instead.
+func InternalFromConn(conn *grpc.ClientConn) *OperationsClient {
+ c, _ := NewOperationsClient(context.Background(), option.WithGRPCConn(conn))
+ return c
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json
new file mode 100644
index 000000000..527142821
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json
@@ -0,0 +1,73 @@
+{
+ "schema": "1.0",
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
+ "language": "go",
+ "protoPackage": "google.longrunning",
+ "libraryPackage": "cloud.google.com/go/longrunning/autogen",
+ "services": {
+ "Operations": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "OperationsClient",
+ "rpcs": {
+ "CancelOperation": {
+ "methods": [
+ "CancelOperation"
+ ]
+ },
+ "DeleteOperation": {
+ "methods": [
+ "DeleteOperation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListOperations": {
+ "methods": [
+ "ListOperations"
+ ]
+ },
+ "WaitOperation": {
+ "methods": [
+ "WaitOperation"
+ ]
+ }
+ }
+ },
+ "rest": {
+ "libraryClient": "OperationsClient",
+ "rpcs": {
+ "CancelOperation": {
+ "methods": [
+ "CancelOperation"
+ ]
+ },
+ "DeleteOperation": {
+ "methods": [
+ "DeleteOperation"
+ ]
+ },
+ "GetOperation": {
+ "methods": [
+ "GetOperation"
+ ]
+ },
+ "ListOperations": {
+ "methods": [
+ "ListOperations"
+ ]
+ },
+ "WaitOperation": {
+ "methods": [
+ "WaitOperation"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/helpers.go b/vendor/cloud.google.com/go/longrunning/autogen/helpers.go
new file mode 100644
index 000000000..4ff8ce8b5
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/helpers.go
@@ -0,0 +1,99 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package longrunning
+
+import (
+ "context"
+ "io"
+ "log/slog"
+ "net/http"
+
+ "github.com/googleapis/gax-go/v2/internallog"
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "longrunning.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{}
+}
+
+func executeHTTPRequestWithResponse(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, *http.Response, error) {
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body))
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+ buf, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, nil, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, buf))
+ if err = googleapi.CheckResponseWithBody(resp, buf); err != nil {
+ return nil, nil, err
+ }
+ return buf, resp, nil
+}
+
+func executeHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) ([]byte, error) {
+ buf, _, err := executeHTTPRequestWithResponse(ctx, client, req, logger, body, rpc)
+ return buf, err
+}
+
+func executeStreamingHTTPRequest(ctx context.Context, client *http.Client, req *http.Request, logger *slog.Logger, body []byte, rpc string) (*http.Response, error) {
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", internallog.HTTPRequest(req, body))
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", internallog.HTTPResponse(resp, nil))
+ if err = googleapi.CheckResponse(resp); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/info.go b/vendor/cloud.google.com/go/longrunning/autogen/info.go
new file mode 100644
index 000000000..b006c4d01
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/info.go
@@ -0,0 +1,24 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package longrunning
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Also passes any
+// provided key-value pairs. Intended for use by Google-written clients.
+//
+// Internal use only.
+func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) {
+ c.setGoogleClientInfo(keyval...)
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go
new file mode 100644
index 000000000..d1a53c679
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go
@@ -0,0 +1,1109 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/longrunning/operations.proto
+
+package longrunningpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status1 "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This resource represents a long-running operation that is the result of a
+// network API call.
+type Operation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The server-assigned name, which is only unique within the same service that
+ // originally returns it. If you use the default HTTP mapping, the
+ // `name` should be a resource name ending with `operations/{unique_id}`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Service-specific metadata associated with the operation. It typically
+ // contains progress information and common metadata such as create time.
+ // Some services might not provide such metadata. Any method that returns a
+ // long-running operation should document the metadata type, if any.
+ Metadata *anypb.Any `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // If the value is `false`, it means the operation is still in progress.
+ // If `true`, the operation is completed, and either `error` or `response` is
+ // available.
+ Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ // The operation result, which can be either an `error` or a valid `response`.
+ // If `done` == `false`, neither `error` nor `response` is set.
+ // If `done` == `true`, exactly one of `error` or `response` can be set.
+ // Some services might not provide the result.
+ //
+ // Types that are assignable to Result:
+ //
+ // *Operation_Error
+ // *Operation_Response
+ Result isOperation_Result `protobuf_oneof:"result"`
+}
+
+func (x *Operation) Reset() {
+ *x = Operation{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Operation.ProtoReflect.Descriptor instead.
+func (*Operation) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Operation) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Operation) GetMetadata() *anypb.Any {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *Operation) GetDone() bool {
+ if x != nil {
+ return x.Done
+ }
+ return false
+}
+
+func (m *Operation) GetResult() isOperation_Result {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (x *Operation) GetError() *status.Status {
+ if x, ok := x.GetResult().(*Operation_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Operation) GetResponse() *anypb.Any {
+ if x, ok := x.GetResult().(*Operation_Response); ok {
+ return x.Response
+ }
+ return nil
+}
+
+type isOperation_Result interface {
+ isOperation_Result()
+}
+
+type Operation_Error struct {
+ // The error result of the operation in case of failure or cancellation.
+ Error *status.Status `protobuf:"bytes,4,opt,name=error,proto3,oneof"`
+}
+
+type Operation_Response struct {
+ // The normal, successful response of the operation. If the original
+ // method returns no data on success, such as `Delete`, the response is
+ // `google.protobuf.Empty`. If the original method is standard
+ // `Get`/`Create`/`Update`, the response should be the resource. For other
+ // methods, the response should have the type `XxxResponse`, where `Xxx`
+ // is the original method name. For example, if the original method name
+ // is `TakeSnapshot()`, the inferred response type is
+ // `TakeSnapshotResponse`.
+ Response *anypb.Any `protobuf:"bytes,5,opt,name=response,proto3,oneof"`
+}
+
+func (*Operation_Error) isOperation_Result() {}
+
+func (*Operation_Response) isOperation_Result() {}
+
+// The request message for
+// [Operations.GetOperation][google.longrunning.Operations.GetOperation].
+type GetOperationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the operation resource.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetOperationRequest) Reset() {
+ *x = GetOperationRequest{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetOperationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetOperationRequest) ProtoMessage() {}
+
+func (x *GetOperationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetOperationRequest.ProtoReflect.Descriptor instead.
+func (*GetOperationRequest) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetOperationRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request message for
+// [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+type ListOperationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the operation's parent resource.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // The standard list filter.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // The standard list page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // The standard list page token.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListOperationsRequest) Reset() {
+ *x = ListOperationsRequest{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListOperationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListOperationsRequest) ProtoMessage() {}
+
+func (x *ListOperationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListOperationsRequest.ProtoReflect.Descriptor instead.
+func (*ListOperationsRequest) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListOperationsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListOperationsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListOperationsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListOperationsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response message for
+// [Operations.ListOperations][google.longrunning.Operations.ListOperations].
+type ListOperationsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of operations that matches the specified filter in the request.
+ Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // The standard List next-page token.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListOperationsResponse) Reset() {
+ *x = ListOperationsResponse{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListOperationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListOperationsResponse) ProtoMessage() {}
+
+func (x *ListOperationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListOperationsResponse.ProtoReflect.Descriptor instead.
+func (*ListOperationsResponse) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListOperationsResponse) GetOperations() []*Operation {
+ if x != nil {
+ return x.Operations
+ }
+ return nil
+}
+
+func (x *ListOperationsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request message for
+// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
+type CancelOperationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the operation resource to be cancelled.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *CancelOperationRequest) Reset() {
+ *x = CancelOperationRequest{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CancelOperationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CancelOperationRequest) ProtoMessage() {}
+
+func (x *CancelOperationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CancelOperationRequest.ProtoReflect.Descriptor instead.
+func (*CancelOperationRequest) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CancelOperationRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request message for
+// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
+type DeleteOperationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the operation resource to be deleted.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteOperationRequest) Reset() {
+ *x = DeleteOperationRequest{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteOperationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteOperationRequest) ProtoMessage() {}
+
+func (x *DeleteOperationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteOperationRequest.ProtoReflect.Descriptor instead.
+func (*DeleteOperationRequest) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteOperationRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request message for
+// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation].
+type WaitOperationRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the operation resource to wait on.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum duration to wait before timing out. If left blank, the wait
+ // will be at most the time permitted by the underlying HTTP/RPC protocol.
+ // If RPC context deadline is also specified, the shorter one will be used.
+ Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *WaitOperationRequest) Reset() {
+ *x = WaitOperationRequest{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WaitOperationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WaitOperationRequest) ProtoMessage() {}
+
+func (x *WaitOperationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WaitOperationRequest.ProtoReflect.Descriptor instead.
+func (*WaitOperationRequest) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *WaitOperationRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+// A message representing the message types used by a long-running operation.
+//
+// Example:
+//
+// rpc Export(ExportRequest) returns (google.longrunning.Operation) {
+// option (google.longrunning.operation_info) = {
+// response_type: "ExportResponse"
+// metadata_type: "ExportMetadata"
+// };
+// }
+type OperationInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The message name of the primary return type for this
+ // long-running operation.
+ // This type will be used to deserialize the LRO's response.
+ //
+ // If the response is in a different package from the rpc, a fully-qualified
+ // message name must be used (e.g. `google.protobuf.Struct`).
+ //
+ // Note: Altering this value constitutes a breaking change.
+ ResponseType string `protobuf:"bytes,1,opt,name=response_type,json=responseType,proto3" json:"response_type,omitempty"`
+ // Required. The message name of the metadata type for this long-running
+ // operation.
+ //
+ // If the response is in a different package from the rpc, a fully-qualified
+ // message name must be used (e.g. `google.protobuf.Struct`).
+ //
+ // Note: Altering this value constitutes a breaking change.
+ MetadataType string `protobuf:"bytes,2,opt,name=metadata_type,json=metadataType,proto3" json:"metadata_type,omitempty"`
+}
+
+func (x *OperationInfo) Reset() {
+ *x = OperationInfo{}
+ mi := &file_google_longrunning_operations_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *OperationInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OperationInfo) ProtoMessage() {}
+
+func (x *OperationInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_longrunning_operations_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OperationInfo.ProtoReflect.Descriptor instead.
+func (*OperationInfo) Descriptor() ([]byte, []int) {
+ return file_google_longrunning_operations_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *OperationInfo) GetResponseType() string {
+ if x != nil {
+ return x.ResponseType
+ }
+ return ""
+}
+
+func (x *OperationInfo) GetMetadataType() string {
+ if x != nil {
+ return x.MetadataType
+ }
+ return ""
+}
+
+var file_google_longrunning_operations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*OperationInfo)(nil),
+ Field: 1049,
+ Name: "google.longrunning.operation_info",
+ Tag: "bytes,1049,opt,name=operation_info",
+ Filename: "google/longrunning/operations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // Additional information regarding long-running operations.
+ // In particular, this specifies the types that are returned from
+ // long-running operations.
+ //
+ // Required for methods that return `google.longrunning.Operation`; invalid
+ // otherwise.
+ //
+ // optional google.longrunning.OperationInfo operation_info = 1049;
+ E_OperationInfo = &file_google_longrunning_operations_proto_extTypes[0]
+)
+
+var File_google_longrunning_operations_proto protoreflect.FileDescriptor
+
+var file_google_longrunning_operations_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
+ 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00,
+ 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48,
+ 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0x7f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0x7f, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
+ 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0x2c, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x5f,
+ 0x0a, 0x14, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22,
+ 0x59, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
+ 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x32, 0xaa, 0x05, 0x0a, 0x0a, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d,
+ 0x12, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a,
+ 0x7d, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
+ 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a,
+ 0x7d, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c,
+ 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65,
+ 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x5a, 0x0a, 0x0d,
+ 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x1a, 0x1d, 0xca, 0x41, 0x1a, 0x6c, 0x6f, 0x6e,
+ 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x3a, 0x69, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68,
+ 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
+ 0x66, 0x6f, 0x42, 0xa5, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x0f, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x43, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x67, 0x65, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e,
+ 0x69, 0x6e, 0x67, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x05, 0x47, 0x4c, 0x52, 0x55, 0x4e,
+ 0xaa, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x4c,
+ 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_google_longrunning_operations_proto_rawDescOnce sync.Once
+ file_google_longrunning_operations_proto_rawDescData = file_google_longrunning_operations_proto_rawDesc
+)
+
+func file_google_longrunning_operations_proto_rawDescGZIP() []byte {
+ file_google_longrunning_operations_proto_rawDescOnce.Do(func() {
+ file_google_longrunning_operations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_longrunning_operations_proto_rawDescData)
+ })
+ return file_google_longrunning_operations_proto_rawDescData
+}
+
+var file_google_longrunning_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_google_longrunning_operations_proto_goTypes = []any{
+ (*Operation)(nil), // 0: google.longrunning.Operation
+ (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest
+ (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest
+ (*ListOperationsResponse)(nil), // 3: google.longrunning.ListOperationsResponse
+ (*CancelOperationRequest)(nil), // 4: google.longrunning.CancelOperationRequest
+ (*DeleteOperationRequest)(nil), // 5: google.longrunning.DeleteOperationRequest
+ (*WaitOperationRequest)(nil), // 6: google.longrunning.WaitOperationRequest
+ (*OperationInfo)(nil), // 7: google.longrunning.OperationInfo
+ (*anypb.Any)(nil), // 8: google.protobuf.Any
+ (*status.Status)(nil), // 9: google.rpc.Status
+ (*durationpb.Duration)(nil), // 10: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 11: google.protobuf.MethodOptions
+ (*emptypb.Empty)(nil), // 12: google.protobuf.Empty
+}
+var file_google_longrunning_operations_proto_depIdxs = []int32{
+ 8, // 0: google.longrunning.Operation.metadata:type_name -> google.protobuf.Any
+ 9, // 1: google.longrunning.Operation.error:type_name -> google.rpc.Status
+ 8, // 2: google.longrunning.Operation.response:type_name -> google.protobuf.Any
+ 0, // 3: google.longrunning.ListOperationsResponse.operations:type_name -> google.longrunning.Operation
+ 10, // 4: google.longrunning.WaitOperationRequest.timeout:type_name -> google.protobuf.Duration
+ 11, // 5: google.longrunning.operation_info:extendee -> google.protobuf.MethodOptions
+ 7, // 6: google.longrunning.operation_info:type_name -> google.longrunning.OperationInfo
+ 2, // 7: google.longrunning.Operations.ListOperations:input_type -> google.longrunning.ListOperationsRequest
+ 1, // 8: google.longrunning.Operations.GetOperation:input_type -> google.longrunning.GetOperationRequest
+ 5, // 9: google.longrunning.Operations.DeleteOperation:input_type -> google.longrunning.DeleteOperationRequest
+ 4, // 10: google.longrunning.Operations.CancelOperation:input_type -> google.longrunning.CancelOperationRequest
+ 6, // 11: google.longrunning.Operations.WaitOperation:input_type -> google.longrunning.WaitOperationRequest
+ 3, // 12: google.longrunning.Operations.ListOperations:output_type -> google.longrunning.ListOperationsResponse
+ 0, // 13: google.longrunning.Operations.GetOperation:output_type -> google.longrunning.Operation
+ 12, // 14: google.longrunning.Operations.DeleteOperation:output_type -> google.protobuf.Empty
+ 12, // 15: google.longrunning.Operations.CancelOperation:output_type -> google.protobuf.Empty
+ 0, // 16: google.longrunning.Operations.WaitOperation:output_type -> google.longrunning.Operation
+ 12, // [12:17] is the sub-list for method output_type
+ 7, // [7:12] is the sub-list for method input_type
+ 6, // [6:7] is the sub-list for extension type_name
+ 5, // [5:6] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_google_longrunning_operations_proto_init() }
+func file_google_longrunning_operations_proto_init() {
+ if File_google_longrunning_operations_proto != nil {
+ return
+ }
+ file_google_longrunning_operations_proto_msgTypes[0].OneofWrappers = []any{
+ (*Operation_Error)(nil),
+ (*Operation_Response)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_longrunning_operations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 1,
+ NumServices: 1,
+ },
+ GoTypes: file_google_longrunning_operations_proto_goTypes,
+ DependencyIndexes: file_google_longrunning_operations_proto_depIdxs,
+ MessageInfos: file_google_longrunning_operations_proto_msgTypes,
+ ExtensionInfos: file_google_longrunning_operations_proto_extTypes,
+ }.Build()
+ File_google_longrunning_operations_proto = out.File
+ file_google_longrunning_operations_proto_rawDesc = nil
+ file_google_longrunning_operations_proto_goTypes = nil
+ file_google_longrunning_operations_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// OperationsClient is the client API for Operations service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type OperationsClient interface {
+ // Lists operations that match the specified filter in the request. If the
+ // server doesn't support this method, it returns `UNIMPLEMENTED`.
+ ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error)
+ // Gets the latest state of a long-running operation. Clients can use this
+ // method to poll the operation result at intervals as recommended by the API
+ // service.
+ GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Deletes a long-running operation. This method indicates that the client is
+ // no longer interested in the operation result. It does not cancel the
+ // operation. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not
+ // guaranteed. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of `1`, corresponding to
+ // `Code.CANCELLED`.
+ CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Waits until the specified long-running operation is done or reaches at most
+ // a specified timeout, returning the latest state. If the operation is
+ // already done, the latest state is immediately returned. If the timeout
+ // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
+ // timeout is used. If the server does not support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ // Note that this method is on a best-effort basis. It may return the latest
+ // state before the specified timeout (including immediately), meaning even an
+ // immediate response is no guarantee that the operation is done.
+ WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error)
+}
+
+type operationsClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient {
+ return &operationsClient{cc}
+}
+
+func (c *operationsClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) {
+ out := new(ListOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.longrunning.Operations/ListOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.longrunning.Operations/GetOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *operationsClient) WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.longrunning.Operations/WaitOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// OperationsServer is the server API for Operations service.
+type OperationsServer interface {
+ // Lists operations that match the specified filter in the request. If the
+ // server doesn't support this method, it returns `UNIMPLEMENTED`.
+ ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error)
+ // Gets the latest state of a long-running operation. Clients can use this
+ // method to poll the operation result at intervals as recommended by the API
+ // service.
+ GetOperation(context.Context, *GetOperationRequest) (*Operation, error)
+ // Deletes a long-running operation. This method indicates that the client is
+ // no longer interested in the operation result. It does not cancel the
+ // operation. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error)
+ // Starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not
+ // guaranteed. If the server doesn't support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of `1`, corresponding to
+ // `Code.CANCELLED`.
+ CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error)
+ // Waits until the specified long-running operation is done or reaches at most
+ // a specified timeout, returning the latest state. If the operation is
+ // already done, the latest state is immediately returned. If the timeout
+ // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
+ // timeout is used. If the server does not support this method, it returns
+ // `google.rpc.Code.UNIMPLEMENTED`.
+ // Note that this method is on a best-effort basis. It may return the latest
+ // state before the specified timeout (including immediately), meaning even an
+ // immediate response is no guarantee that the operation is done.
+ WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error)
+}
+
+// UnimplementedOperationsServer can be embedded to have forward compatible implementations.
+type UnimplementedOperationsServer struct {
+}
+
+func (*UnimplementedOperationsServer) ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListOperations not implemented")
+}
+func (*UnimplementedOperationsServer) GetOperation(context.Context, *GetOperationRequest) (*Operation, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method GetOperation not implemented")
+}
+func (*UnimplementedOperationsServer) DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteOperation not implemented")
+}
+func (*UnimplementedOperationsServer) CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CancelOperation not implemented")
+}
+func (*UnimplementedOperationsServer) WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method WaitOperation not implemented")
+}
+
+func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) {
+ s.RegisterService(&_Operations_serviceDesc, srv)
+}
+
+func _Operations_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(OperationsServer).ListOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.longrunning.Operations/ListOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(OperationsServer).ListOperations(ctx, req.(*ListOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Operations_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(OperationsServer).GetOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.longrunning.Operations/GetOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(OperationsServer).GetOperation(ctx, req.(*GetOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Operations_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(OperationsServer).DeleteOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.longrunning.Operations/DeleteOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(OperationsServer).DeleteOperation(ctx, req.(*DeleteOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Operations_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CancelOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(OperationsServer).CancelOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.longrunning.Operations/CancelOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(OperationsServer).CancelOperation(ctx, req.(*CancelOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Operations_WaitOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(WaitOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(OperationsServer).WaitOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.longrunning.Operations/WaitOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(OperationsServer).WaitOperation(ctx, req.(*WaitOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Operations_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.longrunning.Operations",
+ HandlerType: (*OperationsServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListOperations",
+ Handler: _Operations_ListOperations_Handler,
+ },
+ {
+ MethodName: "GetOperation",
+ Handler: _Operations_GetOperation_Handler,
+ },
+ {
+ MethodName: "DeleteOperation",
+ Handler: _Operations_DeleteOperation_Handler,
+ },
+ {
+ MethodName: "CancelOperation",
+ Handler: _Operations_CancelOperation_Handler,
+ },
+ {
+ MethodName: "WaitOperation",
+ Handler: _Operations_WaitOperation_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/longrunning/operations.proto",
+}
diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
new file mode 100644
index 000000000..57c8173e9
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
@@ -0,0 +1,821 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package longrunning
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/http"
+ "net/url"
+ "time"
+
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ httptransport "google.golang.org/api/transport/http"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+var newOperationsClientHook clientHook
+
+// OperationsCallOptions contains the retry settings for each method of OperationsClient.
+type OperationsCallOptions struct {
+ ListOperations []gax.CallOption
+ GetOperation []gax.CallOption
+ DeleteOperation []gax.CallOption
+ CancelOperation []gax.CallOption
+ WaitOperation []gax.CallOption
+}
+
+func defaultOperationsGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("longrunning.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultOperationsCallOptions() *OperationsCallOptions {
+ return &OperationsCallOptions{
+ ListOperations: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ })
+ }),
+ },
+ GetOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ })
+ }),
+ },
+ DeleteOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ })
+ }),
+ },
+ CancelOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ })
+ }),
+ },
+ WaitOperation: []gax.CallOption{},
+ }
+}
+
+func defaultOperationsRESTCallOptions() *OperationsCallOptions {
+ return &OperationsCallOptions{
+ ListOperations: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ },
+ http.StatusServiceUnavailable)
+ }),
+ },
+ GetOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ },
+ http.StatusServiceUnavailable)
+ }),
+ },
+ DeleteOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ },
+ http.StatusServiceUnavailable)
+ }),
+ },
+ CancelOperation: []gax.CallOption{
+ gax.WithTimeout(10000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 500 * time.Millisecond,
+ Max: 10000 * time.Millisecond,
+ Multiplier: 2.00,
+ },
+ http.StatusServiceUnavailable)
+ }),
+ },
+ WaitOperation: []gax.CallOption{},
+ }
+}
+
+// internalOperationsClient is an interface that defines the methods available from Long Running Operations API.
+type internalOperationsClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
+ DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error
+ CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error
+ WaitOperation(context.Context, *longrunningpb.WaitOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
+}
+
+// OperationsClient is a client for interacting with Long Running Operations API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return Operation to the client, and the
+// client can use this interface to receive the real response asynchronously by
+// polling the operation resource, or pass the operation resource to another API
+// (such as Pub/Sub API) to receive the response. Any API service that returns
+// long-running operations should implement the Operations interface so
+// developers can have a consistent client experience.
+type OperationsClient struct {
+ // The internal transport-dependent client.
+ internalClient internalOperationsClient
+
+ // The call options for this service.
+ CallOptions *OperationsCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *OperationsClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *OperationsClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *OperationsClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListOperations lists operations that match the specified filter in the request. If the
+// server doesn’t support this method, it returns UNIMPLEMENTED.
+func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListOperations(ctx, req, opts...)
+}
+
+// GetOperation gets the latest state of a long-running operation. Clients can use this
+// method to poll the operation result at intervals as recommended by the API
+// service.
+func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
+// DeleteOperation deletes a long-running operation. This method indicates that the client is
+// no longer interested in the operation result. It does not cancel the
+// operation. If the server doesn’t support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED.
+func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteOperation(ctx, req, opts...)
+}
+
+// CancelOperation starts asynchronous cancellation on a long-running operation. The server
+// makes a best effort to cancel the operation, but success is not
+// guaranteed. If the server doesn’t support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED. Clients can use
+// Operations.GetOperation or
+// other methods to check whether the cancellation succeeded or whether the
+// operation completed despite cancellation. On successful cancellation,
+// the operation is not deleted; instead, it becomes an operation with
+// an Operation.error value with a
+// google.rpc.Status.code of 1, corresponding to
+// Code.CANCELLED.
+func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CancelOperation(ctx, req, opts...)
+}
+
+// WaitOperation waits until the specified long-running operation is done or reaches at most
+// a specified timeout, returning the latest state. If the operation is
+// already done, the latest state is immediately returned. If the timeout
+// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
+// timeout is used. If the server does not support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED.
+// Note that this method is on a best-effort basis. It may return the latest
+// state before the specified timeout (including immediately), meaning even an
+// immediate response is no guarantee that the operation is done.
+func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.WaitOperation(ctx, req, opts...)
+}
+
+// operationsGRPCClient is a client for interacting with Long Running Operations API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type operationsGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing OperationsClient
+ CallOptions **OperationsCallOptions
+
+ // The gRPC API client.
+ operationsClient longrunningpb.OperationsClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewOperationsClient creates a new operations client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return Operation to the client, and the
+// client can use this interface to receive the real response asynchronously by
+// polling the operation resource, or pass the operation resource to another API
+// (such as Pub/Sub API) to receive the response. Any API service that returns
+// long-running operations should implement the Operations interface so
+// developers can have a consistent client experience.
+func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
+ clientOpts := defaultOperationsGRPCClientOptions()
+ if newOperationsClientHook != nil {
+ hookOpts, err := newOperationsClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := OperationsClient{CallOptions: defaultOperationsCallOptions()}
+
+ c := &operationsGRPCClient{
+ connPool: connPool,
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *operationsGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *operationsGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type operationsRESTClient struct {
+ // The http endpoint to connect to.
+ endpoint string
+
+ // The http client.
+ httpClient *http.Client
+
+ // The x-goog-* headers to be sent with each request.
+ xGoogHeaders []string
+
+ // Points back to the CallOptions field of the containing OperationsClient
+ CallOptions **OperationsCallOptions
+
+ logger *slog.Logger
+}
+
+// NewOperationsRESTClient creates a new operations rest client.
+//
+// Manages long-running operations with an API service.
+//
+// When an API method normally takes long time to complete, it can be designed
+// to return Operation to the client, and the
+// client can use this interface to receive the real response asynchronously by
+// polling the operation resource, or pass the operation resource to another API
+// (such as Pub/Sub API) to receive the response. Any API service that returns
+// long-running operations should implement the Operations interface so
+// developers can have a consistent client experience.
+func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
+ clientOpts := append(defaultOperationsRESTClientOptions(), opts...)
+ httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ callOpts := defaultOperationsRESTCallOptions()
+ c := &operationsRESTClient{
+ endpoint: endpoint,
+ httpClient: httpClient,
+ CallOptions: &callOpts,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil
+}
+
+func defaultOperationsRESTClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://longrunning.UNIVERSE_DOMAIN"),
+ internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableNewAuthLibrary(),
+ }
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *operationsRESTClient) Close() error {
+ // Replace httpClient with nil to force cleanup.
+ c.httpClient = nil
+ return nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: This method always returns nil.
+func (c *operationsRESTClient) Connection() *grpc.ClientConn {
+ return nil
+}
+func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &longrunningpb.ListOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.ListOperations, req, settings.GRPC, c.logger, "ListOperations")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.GetOperation, req, settings.GRPC, c.logger, "GetOperation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.operationsClient.DeleteOperation, req, settings.GRPC, c.logger, "DeleteOperation")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.operationsClient.CancelOperation, req, settings.GRPC, c.logger, "CancelOperation")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
+ opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.operationsClient.WaitOperation, req, settings.GRPC, c.logger, "WaitOperation")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListOperations lists operations that match the specified filter in the request. If the
+// server doesn’t support this method, it returns UNIMPLEMENTED.
+func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &longrunningpb.ListOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "ListOperations")
+ if err != nil {
+ return err
+ }
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetOperation gets the latest state of a long-running operation. Clients can use this
+// method to poll the operation result at intervals as recommended by the API
+// service.
+func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "GetOperation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// DeleteOperation deletes a long-running operation. This method indicates that the client is
+// no longer interested in the operation result. It does not cancel the
+// operation. If the server doesn’t support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED.
+func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "DeleteOperation")
+ return err
+ }, opts...)
+}
+
+// CancelOperation starts asynchronous cancellation on a long-running operation. The server
+// makes a best effort to cancel the operation, but success is not
+// guaranteed. If the server doesn’t support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED. Clients can use
+// Operations.GetOperation or
+// other methods to check whether the cancellation succeeded or whether the
+// operation completed despite cancellation. On successful cancellation,
+// the operation is not deleted; instead, it becomes an operation with
+// an Operation.error value with a
+// google.rpc.Status.code of 1, corresponding to
+// Code.CANCELLED.
+func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName())
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ _, err = executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, jsonReq, "CancelOperation")
+ return err
+ }, opts...)
+}
+
+// WaitOperation waits until the specified long-running operation is done or reaches at most
+// a specified timeout, returning the latest state. If the operation is
+// already done, the latest state is immediately returned. If the timeout
+// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
+// timeout is used. If the server does not support this method, it returns
+// google.rpc.Code.UNIMPLEMENTED.
+// Note that this method is on a best-effort basis. It may return the latest
+// state before the specified timeout (including immediately), meaning even an
+// immediate response is no guarantee that the operation is done.
+func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("")
+
+ params := url.Values{}
+ if req.GetName() != "" {
+ params.Add("name", fmt.Sprintf("%v", req.GetName()))
+ }
+ if req.GetTimeout() != nil {
+ field, err := protojson.Marshal(req.GetTimeout())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("timeout", string(field[1:len(field)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ buf, err := executeHTTPRequest(ctx, c.httpClient, httpReq, c.logger, nil, "WaitOperation")
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go
new file mode 100644
index 000000000..3c75b761e
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/longrunning.go
@@ -0,0 +1,182 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package longrunning supports Long Running Operations for the Google Cloud Libraries.
+// See google.golang.org/genproto/googleapis/longrunning for its service definition.
+//
+// Users of the Google Cloud Libraries will typically not use this package directly.
+// Instead they will call functions returning Operations and call their methods.
+//
+// This package is still experimental and subject to change.
+package longrunning // import "cloud.google.com/go/longrunning"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ autogen "cloud.google.com/go/longrunning/autogen"
+ pb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "github.com/googleapis/gax-go/v2/apierror"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata.
+var ErrNoMetadata = errors.New("operation contains no metadata")
+
+// Operation represents the result of an API call that may not be ready yet.
+type Operation struct {
+ c operationsClient
+ proto *pb.Operation
+}
+
+type operationsClient interface {
+ GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error)
+ CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error
+ DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error
+}
+
+// InternalNewOperation is for use by the google Cloud Libraries only.
+//
+// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation.
+// The conn parameter refers to a server that proto was received from.
+func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation {
+ return &Operation{
+ c: inner,
+ proto: proto,
+ }
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service
+// from which the operation is created.
+func (op *Operation) Name() string {
+ return op.proto.Name
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *Operation) Done() bool {
+ return op.proto.Done
+}
+
+// Metadata unmarshals op's metadata into meta.
+// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified.
+func (op *Operation) Metadata(meta protoadapt.MessageV1) error {
+ if m := op.proto.Metadata; m != nil {
+ metav2 := protoadapt.MessageV2Of(meta)
+ return anypb.UnmarshalTo(m, metav2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true})
+ }
+ return ErrNoMetadata
+}
+
+// Poll fetches the latest state of a long-running operation.
+//
+// If Poll fails, the error is returned and op is unmodified.
+// If Poll succeeds and the operation has completed with failure,
+// the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true; if resp != nil, the response of the operation
+// is stored in resp.
+func (op *Operation) Poll(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error {
+ if !op.Done() {
+ p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...)
+ if err != nil {
+ return err
+ }
+ op.proto = p
+ }
+ if !op.Done() {
+ return nil
+ }
+
+ switch r := op.proto.Result.(type) {
+ case *pb.Operation_Error:
+ err, _ := apierror.FromError(status.ErrorProto(r.Error))
+ return err
+ case *pb.Operation_Response:
+ if resp == nil {
+ return nil
+ }
+ respv2 := protoadapt.MessageV2Of(resp)
+ return anypb.UnmarshalTo(r.Response, respv2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true})
+ default:
+ return fmt.Errorf("unsupported result type %[1]T: %[1]v", r)
+ }
+}
+
+// DefaultWaitInterval is the polling interval used by Operation.Wait.
+const DefaultWaitInterval = 60 * time.Second
+
+// Wait is equivalent to WaitWithInterval using DefaultWaitInterval.
+func (op *Operation) Wait(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error {
+ return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...)
+}
+
+// WaitWithInterval blocks until the operation is completed.
+// If resp != nil, Wait stores the response in resp.
+// WaitWithInterval polls every interval, except initially
+// when it polls using exponential backoff.
+//
+// See documentation of Poll for error-handling information.
+func (op *Operation) WaitWithInterval(ctx context.Context, resp protoadapt.MessageV1, interval time.Duration, opts ...gax.CallOption) error {
+ bo := gax.Backoff{
+ Initial: 1 * time.Second,
+ Max: interval,
+ }
+ if bo.Max < bo.Initial {
+ bo.Max = bo.Initial
+ }
+ return op.wait(ctx, resp, &bo, gax.Sleep, opts...)
+}
+
+type sleeper func(context.Context, time.Duration) error
+
+// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing.
+func (op *Operation) wait(ctx context.Context, resp protoadapt.MessageV1, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error {
+ for {
+ if err := op.Poll(ctx, resp, opts...); err != nil {
+ return err
+ }
+ if op.Done() {
+ return nil
+ }
+ if err := sl(ctx, bo.Pause()); err != nil {
+ return err
+ }
+ }
+}
+
+// Cancel starts asynchronous cancellation on a long-running operation. The server
+// makes a best effort to cancel the operation, but success is not
+// guaranteed. If the server doesn't support this method, it returns
+// status.Code(err) == codes.Unimplemented. Clients can use
+// Poll or other methods to check whether the cancellation succeeded or whether the
+// operation completed despite cancellation. On successful cancellation,
+// the operation is not deleted; instead, op.Poll returns an error
+// with code Canceled.
+func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error {
+ return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...)
+}
+
+// Delete deletes a long-running operation. This method indicates that the client is
+// no longer interested in the operation result. It does not cancel the
+// operation. If the server doesn't support this method, status.Code(err) == codes.Unimplemented.
+func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error {
+ return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...)
+}
diff --git a/vendor/cloud.google.com/go/longrunning/tidyfix.go b/vendor/cloud.google.com/go/longrunning/tidyfix.go
new file mode 100644
index 000000000..d9a07f99e
--- /dev/null
+++ b/vendor/cloud.google.com/go/longrunning/tidyfix.go
@@ -0,0 +1,23 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file, and the {{.RootMod}} import, won't actually become part of
+// the resultant binary.
+//go:build modhack
+// +build modhack
+
+package longrunning
+
+// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
+import _ "cloud.google.com/go"
diff --git a/vendor/cloud.google.com/go/migration.md b/vendor/cloud.google.com/go/migration.md
new file mode 100644
index 000000000..224dcfa13
--- /dev/null
+++ b/vendor/cloud.google.com/go/migration.md
@@ -0,0 +1,50 @@
+# go-genproto to google-cloud-go message type migration
+
+The message types for all of our client libraries are being migrated from the
+`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto)
+to their respective product specific module in this repository. For example
+this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest)
+can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest).
+
+Although the type definitions have moved, aliases have been left in the old
+genproto packages to ensure a smooth non-breaking transition.
+
+## How do I migrate to the new packages?
+
+The easiest option is to run a migration tool at the root of our project. It is
+like `go fix`, but specifically for this migration. Before running the tool it
+is best to make sure any modules that have the prefix of `cloud.google.com/go`
+are up to date. To run the tool, do the following:
+
+```bash
+go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest .
+go mod tidy
+```
+
+The tool should only change up to one line in the import statement per file.
+This can also be done by hand if you prefer.
+
+## Do I have to migrate?
+
+Yes if you wish to keep using the newest versions of our client libraries with
+the newest features -- You should migrate by the start of 2023. Until then we
+will keep updating the aliases in go-genproto weekly. If you have an existing
+workload that uses these client libraries and does not need to update its
+dependencies there is no action to take. All existing written code will continue
+to work.
+
+## Why are these types being moved
+
+1. This change will help simplify dependency trees over time.
+2. The types will now be in product specific modules that are versioned
+ independently with semver. This is especially a benefit for users that rely
+ on multiple clients in a single application. Because message types are no
+ longer mono-packaged users are less likely to run into intermediate
+ dependency conflicts when updating dependencies.
+3. Having all these types in one repository will help us ensure that unintended
+ changes are caught before they would be released.
+
+## Have questions?
+
+Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help)
+if you have any questions or concerns.
diff --git a/vendor/cloud.google.com/go/monitoring/LICENSE b/vendor/cloud.google.com/go/monitoring/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
new file mode 100644
index 000000000..9a9408f19
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
@@ -0,0 +1,403 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newAlertPolicyClientHook clientHook
+
+// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
+type AlertPolicyCallOptions struct {
+ ListAlertPolicies []gax.CallOption
+ GetAlertPolicy []gax.CallOption
+ CreateAlertPolicy []gax.CallOption
+ DeleteAlertPolicy []gax.CallOption
+ UpdateAlertPolicy []gax.CallOption
+}
+
+func defaultAlertPolicyGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
+ return &AlertPolicyCallOptions{
+ ListAlertPolicies: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ }
+}
+
+// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalAlertPolicyClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator
+ GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+ CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+ DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error
+ UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+}
+
+// AlertPolicyClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Cloud Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be “unhealthy” and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the “Monitoring” tab in
+// Cloud console (at https://console.cloud.google.com/).
+type AlertPolicyClient struct {
+ // The internal transport-dependent client.
+ internalClient internalAlertPolicyClient
+
+ // The call options for this service.
+ CallOptions *AlertPolicyCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AlertPolicyClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListAlertPolicies lists the existing alerting policies for the workspace.
+func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ return c.internalClient.ListAlertPolicies(ctx, req, opts...)
+}
+
+// GetAlertPolicy gets a single alerting policy.
+func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.GetAlertPolicy(ctx, req, opts...)
+}
+
+// CreateAlertPolicy creates a new alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.CreateAlertPolicy(ctx, req, opts...)
+}
+
+// DeleteAlertPolicy deletes an alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteAlertPolicy(ctx, req, opts...)
+}
+
+// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
+// a new one or replace only certain fields in the current alerting policy by
+// specifying the fields to be updated via updateMask. Returns the
+// updated alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.UpdateAlertPolicy(ctx, req, opts...)
+}
+
+// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type alertPolicyGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing AlertPolicyClient
+ CallOptions **AlertPolicyCallOptions
+
+ // The gRPC API client.
+ alertPolicyClient monitoringpb.AlertPolicyServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewAlertPolicyClient creates a new alert policy service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Cloud Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be “unhealthy” and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the “Monitoring” tab in
+// Cloud console (at https://console.cloud.google.com/).
+func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
+ clientOpts := defaultAlertPolicyGRPCClientOptions()
+ if newAlertPolicyClientHook != nil {
+ hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()}
+
+ c := &alertPolicyGRPCClient{
+ connPool: connPool,
+ alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *alertPolicyGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...)
+ it := &AlertPolicyIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
+ resp := &monitoringpb.ListAlertPoliciesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.ListAlertPolicies, req, settings.GRPC, c.logger, "ListAlertPolicies")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.GetAlertPolicy, req, settings.GRPC, c.logger, "GetAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.CreateAlertPolicy, req, settings.GRPC, c.logger, "CreateAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.alertPolicyClient.DeleteAlertPolicy, req, settings.GRPC, c.logger, "DeleteAlertPolicy")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.UpdateAlertPolicy, req, settings.GRPC, c.logger, "UpdateAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
new file mode 100644
index 000000000..8dc963458
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
@@ -0,0 +1,682 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "google.golang.org/api/iterator"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
+type AlertPolicyIterator struct {
+ items []*monitoringpb.AlertPolicy
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
+ var item *monitoringpb.AlertPolicy
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *AlertPolicyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *AlertPolicyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// GroupIterator manages a stream of *monitoringpb.Group.
+type GroupIterator struct {
+ items []*monitoringpb.Group
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *GroupIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *GroupIterator) Next() (*monitoringpb.Group, error) {
+ var item *monitoringpb.Group
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *GroupIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *GroupIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor.
+type MetricDescriptorIterator struct {
+ items []*metricpb.MetricDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) {
+ var item *metricpb.MetricDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MetricDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MetricDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+ items []*monitoredrespb.MonitoredResourceDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ var item *monitoredrespb.MonitoredResourceDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource.
+type MonitoredResourceIterator struct {
+ items []*monitoredrespb.MonitoredResource
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) {
+ var item *monitoredrespb.MonitoredResource
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
+type NotificationChannelDescriptorIterator struct {
+ items []*monitoringpb.NotificationChannelDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
+ var item *monitoringpb.NotificationChannelDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
+type NotificationChannelIterator struct {
+ items []*monitoringpb.NotificationChannel
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
+ var item *monitoringpb.NotificationChannel
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ServiceIterator manages a stream of *monitoringpb.Service.
+type ServiceIterator struct {
+ items []*monitoringpb.Service
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *ServiceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ServiceIterator) Next() (*monitoringpb.Service, error) {
+ var item *monitoringpb.Service
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ServiceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ServiceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective.
+type ServiceLevelObjectiveIterator struct {
+ items []*monitoringpb.ServiceLevelObjective
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) {
+ var item *monitoringpb.ServiceLevelObjective
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ServiceLevelObjectiveIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// SnoozeIterator manages a stream of *monitoringpb.Snooze.
+type SnoozeIterator struct {
+ items []*monitoringpb.Snooze
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *SnoozeIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) {
+ var item *monitoringpb.Snooze
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *SnoozeIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *SnoozeIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData.
+type TimeSeriesDataIterator struct {
+ items []*monitoringpb.TimeSeriesData
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) {
+ var item *monitoringpb.TimeSeriesData
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesDataIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesDataIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries.
+type TimeSeriesIterator struct {
+ items []*monitoringpb.TimeSeries
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) {
+ var item *monitoringpb.TimeSeries
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig.
+type UptimeCheckConfigIterator struct {
+ items []*monitoringpb.UptimeCheckConfig
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) {
+ var item *monitoringpb.UptimeCheckConfig
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckConfigIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckConfigIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp.
+type UptimeCheckIpIterator struct {
+ items []*monitoringpb.UptimeCheckIp
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) {
+ var item *monitoringpb.UptimeCheckIp
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckIpIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckIpIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
new file mode 100644
index 000000000..bf559553b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
@@ -0,0 +1,112 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package monitoring
+
+import (
+ "iter"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2/iterator"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
new file mode 100644
index 000000000..1d5136eda
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
@@ -0,0 +1,85 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+// Package monitoring is an auto-generated package for the
+// Cloud Monitoring API.
+//
+// Manages your Cloud Monitoring data and configurations.
+//
+// NOTE: This package is in beta. It is not stable, and may be subject to changes.
+//
+// # General documentation
+//
+// For information that is relevant for all client libraries please reference
+// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
+// page includes:
+//
+// - [Authentication and Authorization]
+// - [Timeouts and Cancellation]
+// - [Testing against Client Libraries]
+// - [Debugging Client Libraries]
+// - [Inspecting errors]
+//
+// # Example usage
+//
+// To get started with this package, create a client.
+//
+// // go get cloud.google.com/go/monitoring/apiv3/v2@latest
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := monitoring.NewAlertPolicyClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// The client will use your default application credentials. Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+// The returned client must be Closed when it is done being used.
+//
+// # Using the Client
+//
+// The following is an example of making an API call with the newly created client, mentioned above.
+//
+// req := &monitoringpb.CreateAlertPolicyRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest.
+// }
+// resp, err := c.CreateAlertPolicy(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// // TODO: Use resp.
+// _ = resp
+//
+// # Use of Context
+//
+// The ctx passed to NewAlertPolicyClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
+// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
+// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
+// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
+// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
+package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2"
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
new file mode 100644
index 000000000..a33cb6fcf
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
@@ -0,0 +1,336 @@
+{
+ "schema": "1.0",
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
+ "language": "go",
+ "protoPackage": "google.monitoring.v3",
+ "libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2",
+ "services": {
+ "AlertPolicyService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "AlertPolicyClient",
+ "rpcs": {
+ "CreateAlertPolicy": {
+ "methods": [
+ "CreateAlertPolicy"
+ ]
+ },
+ "DeleteAlertPolicy": {
+ "methods": [
+ "DeleteAlertPolicy"
+ ]
+ },
+ "GetAlertPolicy": {
+ "methods": [
+ "GetAlertPolicy"
+ ]
+ },
+ "ListAlertPolicies": {
+ "methods": [
+ "ListAlertPolicies"
+ ]
+ },
+ "UpdateAlertPolicy": {
+ "methods": [
+ "UpdateAlertPolicy"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "GroupService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "GroupClient",
+ "rpcs": {
+ "CreateGroup": {
+ "methods": [
+ "CreateGroup"
+ ]
+ },
+ "DeleteGroup": {
+ "methods": [
+ "DeleteGroup"
+ ]
+ },
+ "GetGroup": {
+ "methods": [
+ "GetGroup"
+ ]
+ },
+ "ListGroupMembers": {
+ "methods": [
+ "ListGroupMembers"
+ ]
+ },
+ "ListGroups": {
+ "methods": [
+ "ListGroups"
+ ]
+ },
+ "UpdateGroup": {
+ "methods": [
+ "UpdateGroup"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "MetricService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "MetricClient",
+ "rpcs": {
+ "CreateMetricDescriptor": {
+ "methods": [
+ "CreateMetricDescriptor"
+ ]
+ },
+ "CreateServiceTimeSeries": {
+ "methods": [
+ "CreateServiceTimeSeries"
+ ]
+ },
+ "CreateTimeSeries": {
+ "methods": [
+ "CreateTimeSeries"
+ ]
+ },
+ "DeleteMetricDescriptor": {
+ "methods": [
+ "DeleteMetricDescriptor"
+ ]
+ },
+ "GetMetricDescriptor": {
+ "methods": [
+ "GetMetricDescriptor"
+ ]
+ },
+ "GetMonitoredResourceDescriptor": {
+ "methods": [
+ "GetMonitoredResourceDescriptor"
+ ]
+ },
+ "ListMetricDescriptors": {
+ "methods": [
+ "ListMetricDescriptors"
+ ]
+ },
+ "ListMonitoredResourceDescriptors": {
+ "methods": [
+ "ListMonitoredResourceDescriptors"
+ ]
+ },
+ "ListTimeSeries": {
+ "methods": [
+ "ListTimeSeries"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "NotificationChannelService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "NotificationChannelClient",
+ "rpcs": {
+ "CreateNotificationChannel": {
+ "methods": [
+ "CreateNotificationChannel"
+ ]
+ },
+ "DeleteNotificationChannel": {
+ "methods": [
+ "DeleteNotificationChannel"
+ ]
+ },
+ "GetNotificationChannel": {
+ "methods": [
+ "GetNotificationChannel"
+ ]
+ },
+ "GetNotificationChannelDescriptor": {
+ "methods": [
+ "GetNotificationChannelDescriptor"
+ ]
+ },
+ "GetNotificationChannelVerificationCode": {
+ "methods": [
+ "GetNotificationChannelVerificationCode"
+ ]
+ },
+ "ListNotificationChannelDescriptors": {
+ "methods": [
+ "ListNotificationChannelDescriptors"
+ ]
+ },
+ "ListNotificationChannels": {
+ "methods": [
+ "ListNotificationChannels"
+ ]
+ },
+ "SendNotificationChannelVerificationCode": {
+ "methods": [
+ "SendNotificationChannelVerificationCode"
+ ]
+ },
+ "UpdateNotificationChannel": {
+ "methods": [
+ "UpdateNotificationChannel"
+ ]
+ },
+ "VerifyNotificationChannel": {
+ "methods": [
+ "VerifyNotificationChannel"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "QueryService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "QueryClient",
+ "rpcs": {
+ "QueryTimeSeries": {
+ "methods": [
+ "QueryTimeSeries"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "ServiceMonitoringService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "ServiceMonitoringClient",
+ "rpcs": {
+ "CreateService": {
+ "methods": [
+ "CreateService"
+ ]
+ },
+ "CreateServiceLevelObjective": {
+ "methods": [
+ "CreateServiceLevelObjective"
+ ]
+ },
+ "DeleteService": {
+ "methods": [
+ "DeleteService"
+ ]
+ },
+ "DeleteServiceLevelObjective": {
+ "methods": [
+ "DeleteServiceLevelObjective"
+ ]
+ },
+ "GetService": {
+ "methods": [
+ "GetService"
+ ]
+ },
+ "GetServiceLevelObjective": {
+ "methods": [
+ "GetServiceLevelObjective"
+ ]
+ },
+ "ListServiceLevelObjectives": {
+ "methods": [
+ "ListServiceLevelObjectives"
+ ]
+ },
+ "ListServices": {
+ "methods": [
+ "ListServices"
+ ]
+ },
+ "UpdateService": {
+ "methods": [
+ "UpdateService"
+ ]
+ },
+ "UpdateServiceLevelObjective": {
+ "methods": [
+ "UpdateServiceLevelObjective"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "SnoozeService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "SnoozeClient",
+ "rpcs": {
+ "CreateSnooze": {
+ "methods": [
+ "CreateSnooze"
+ ]
+ },
+ "GetSnooze": {
+ "methods": [
+ "GetSnooze"
+ ]
+ },
+ "ListSnoozes": {
+ "methods": [
+ "ListSnoozes"
+ ]
+ },
+ "UpdateSnooze": {
+ "methods": [
+ "UpdateSnooze"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "UptimeCheckService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "UptimeCheckClient",
+ "rpcs": {
+ "CreateUptimeCheckConfig": {
+ "methods": [
+ "CreateUptimeCheckConfig"
+ ]
+ },
+ "DeleteUptimeCheckConfig": {
+ "methods": [
+ "DeleteUptimeCheckConfig"
+ ]
+ },
+ "GetUptimeCheckConfig": {
+ "methods": [
+ "GetUptimeCheckConfig"
+ ]
+ },
+ "ListUptimeCheckConfigs": {
+ "methods": [
+ "ListUptimeCheckConfigs"
+ ]
+ },
+ "ListUptimeCheckIps": {
+ "methods": [
+ "ListUptimeCheckIps"
+ ]
+ },
+ "UpdateUptimeCheckConfig": {
+ "methods": [
+ "UpdateUptimeCheckConfig"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
new file mode 100644
index 000000000..a45e1aec2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
@@ -0,0 +1,470 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newGroupClientHook clientHook
+
+// GroupCallOptions contains the retry settings for each method of GroupClient.
+type GroupCallOptions struct {
+ ListGroups []gax.CallOption
+ GetGroup []gax.CallOption
+ CreateGroup []gax.CallOption
+ UpdateGroup []gax.CallOption
+ DeleteGroup []gax.CallOption
+ ListGroupMembers []gax.CallOption
+}
+
+func defaultGroupGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultGroupCallOptions() *GroupCallOptions {
+ return &GroupCallOptions{
+ ListGroups: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateGroup: []gax.CallOption{
+ gax.WithTimeout(180000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ DeleteGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListGroupMembers: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalGroupClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator
+ GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error
+ ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator
+}
+
+// GroupClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+type GroupClient struct {
+ // The internal transport-dependent client.
+ internalClient internalGroupClient
+
+ // The call options for this service.
+ CallOptions *GroupCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *GroupClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *GroupClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListGroups lists the existing groups.
+func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ return c.internalClient.ListGroups(ctx, req, opts...)
+}
+
+// GetGroup gets a single group.
+func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.GetGroup(ctx, req, opts...)
+}
+
+// CreateGroup creates a new group.
+func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.CreateGroup(ctx, req, opts...)
+}
+
+// UpdateGroup updates an existing group.
+// You can change any group attributes except name.
+func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.UpdateGroup(ctx, req, opts...)
+}
+
+// DeleteGroup deletes an existing group.
+func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteGroup(ctx, req, opts...)
+}
+
+// ListGroupMembers lists the monitored resources that are members of a group.
+func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ return c.internalClient.ListGroupMembers(ctx, req, opts...)
+}
+
+// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type groupGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing GroupClient
+ CallOptions **GroupCallOptions
+
+ // The gRPC API client.
+ groupClient monitoringpb.GroupServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewGroupClient creates a new group service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
+ clientOpts := defaultGroupGRPCClientOptions()
+ if newGroupClientHook != nil {
+ hookOpts, err := newGroupClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := GroupClient{CallOptions: defaultGroupCallOptions()}
+
+ c := &groupGRPCClient{
+ connPool: connPool,
+ groupClient: monitoringpb.NewGroupServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *groupGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *groupGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...)
+ it := &GroupIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
+ resp := &monitoringpb.ListGroupsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.ListGroups, req, settings.GRPC, c.logger, "ListGroups")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetGroup(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.GetGroup, req, settings.GRPC, c.logger, "GetGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.CreateGroup, req, settings.GRPC, c.logger, "CreateGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.UpdateGroup, req, settings.GRPC, c.logger, "UpdateGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.groupClient.DeleteGroup, req, settings.GRPC, c.logger, "DeleteGroup")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...)
+ it := &MonitoredResourceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
+ resp := &monitoringpb.ListGroupMembersResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.ListGroupMembers, req, settings.GRPC, c.logger, "ListGroupMembers")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetMembers(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
new file mode 100644
index 000000000..6719cac86
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
@@ -0,0 +1,64 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "monitoring.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/monitoring",
+ "https://www.googleapis.com/auth/monitoring.read",
+ "https://www.googleapis.com/auth/monitoring.write",
+ }
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
new file mode 100644
index 000000000..29eb4849d
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
@@ -0,0 +1,582 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newMetricClientHook clientHook
+
+// MetricCallOptions contains the retry settings for each method of MetricClient.
+type MetricCallOptions struct {
+ ListMonitoredResourceDescriptors []gax.CallOption
+ GetMonitoredResourceDescriptor []gax.CallOption
+ ListMetricDescriptors []gax.CallOption
+ GetMetricDescriptor []gax.CallOption
+ CreateMetricDescriptor []gax.CallOption
+ DeleteMetricDescriptor []gax.CallOption
+ ListTimeSeries []gax.CallOption
+ CreateTimeSeries []gax.CallOption
+ CreateServiceTimeSeries []gax.CallOption
+}
+
+func defaultMetricGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultMetricCallOptions() *MetricCallOptions {
+ return &MetricCallOptions{
+ ListMonitoredResourceDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetMonitoredResourceDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListMetricDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(12000 * time.Millisecond),
+ },
+ DeleteMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListTimeSeries: []gax.CallOption{
+ gax.WithTimeout(90000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateTimeSeries: []gax.CallOption{
+ gax.WithTimeout(12000 * time.Millisecond),
+ },
+ CreateServiceTimeSeries: []gax.CallOption{},
+ }
+}
+
+// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalMetricClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator
+ GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error)
+ ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator
+ GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
+ CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
+ DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error
+ ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator
+ CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
+ CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
+}
+
+// MetricClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+type MetricClient struct {
+ // The internal transport-dependent client.
+ internalClient internalMetricClient
+
+ // The call options for this service.
+ CallOptions *MetricCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *MetricClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter.
+func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...)
+}
+
+// GetMonitoredResourceDescriptor gets a single monitored resource descriptor.
+func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...)
+}
+
+// ListMetricDescriptors lists metric descriptors that match a filter.
+func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ return c.internalClient.ListMetricDescriptors(ctx, req, opts...)
+}
+
+// GetMetricDescriptor gets a single metric descriptor.
+func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ return c.internalClient.GetMetricDescriptor(ctx, req, opts...)
+}
+
+// CreateMetricDescriptor creates a new metric descriptor.
+// The creation is executed asynchronously.
+// User-created metric descriptors define
+// custom metrics (at https://cloud.google.com/monitoring/custom-metrics).
+// The metric descriptor is updated if it already exists,
+// except that metric labels are never removed.
+func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ return c.internalClient.CreateMetricDescriptor(ctx, req, opts...)
+}
+
+// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
+// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be
+// deleted.
+func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...)
+}
+
+// ListTimeSeries lists time series that match a filter.
+func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ return c.internalClient.ListTimeSeries(ctx, req, opts...)
+}
+
+// CreateTimeSeries creates or adds data to one or more time series.
+// The response is empty if all time series in the request were written.
+// If any time series could not be written, a corresponding failure message is
+// included in the error response.
+// This method does not support
+// resource locations constraint of an organization
+// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CreateTimeSeries(ctx, req, opts...)
+}
+
+// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time
+// series is a time series for a metric from a Google Cloud service. The
+// response is empty if all time series in the request were written. If any
+// time series could not be written, a corresponding failure message is
+// included in the error response. This endpoint rejects writes to
+// user-defined metrics.
+// This method is only for use by Google Cloud services. Use
+// projects.timeSeries.create
+// instead.
+func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...)
+}
+
+// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type metricGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing MetricClient
+ CallOptions **MetricCallOptions
+
+ // The gRPC API client.
+ metricClient monitoringpb.MetricServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewMetricClient creates a new metric service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
+ clientOpts := defaultMetricGRPCClientOptions()
+ if newMetricClientHook != nil {
+ hookOpts, err := newMetricClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := MetricClient{CallOptions: defaultMetricCallOptions()}
+
+ c := &metricGRPCClient{
+ connPool: connPool,
+ metricClient: monitoringpb.NewMetricServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *metricGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *metricGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...)
+ it := &MonitoredResourceDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
+ resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListMonitoredResourceDescriptors, req, settings.GRPC, c.logger, "ListMonitoredResourceDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...)
+ var resp *monitoredrespb.MonitoredResourceDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.GetMonitoredResourceDescriptor, req, settings.GRPC, c.logger, "GetMonitoredResourceDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...)
+ it := &MetricDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
+ resp := &monitoringpb.ListMetricDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListMetricDescriptors, req, settings.GRPC, c.logger, "ListMetricDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.GetMetricDescriptor, req, settings.GRPC, c.logger, "GetMetricDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.CreateMetricDescriptor, req, settings.GRPC, c.logger, "CreateMetricDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.DeleteMetricDescriptor, req, settings.GRPC, c.logger, "DeleteMetricDescriptor")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...)
+ it := &TimeSeriesIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
+ resp := &monitoringpb.ListTimeSeriesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListTimeSeries, req, settings.GRPC, c.logger, "ListTimeSeries")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetTimeSeries(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.CreateTimeSeries, req, settings.GRPC, c.logger, "CreateTimeSeries")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.CreateServiceTimeSeries, req, settings.GRPC, c.logger, "CreateServiceTimeSeries")
+ return err
+ }, opts...)
+ return err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
new file mode 100644
index 000000000..24ca1414b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
@@ -0,0 +1,2894 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/alert.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ timeofday "google.golang.org/genproto/googleapis/type/timeofday"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Operators for combining conditions.
+type AlertPolicy_ConditionCombinerType int32
+
+const (
+ // An unspecified combiner.
+ AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0
+ // Combine conditions using the logical `AND` operator. An
+ // incident is created only if all the conditions are met
+ // simultaneously. This combiner is satisfied if all conditions are
+ // met, even if they are met on completely different resources.
+ AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1
+ // Combine conditions using the logical `OR` operator. An incident
+ // is created if any of the listed conditions is met.
+ AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2
+ // Combine conditions using logical `AND` operator, but unlike the regular
+ // `AND` option, an incident is created only if all conditions are met
+ // simultaneously on at least one resource.
+ AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3
+)
+
+// Enum value maps for AlertPolicy_ConditionCombinerType.
+var (
+ AlertPolicy_ConditionCombinerType_name = map[int32]string{
+ 0: "COMBINE_UNSPECIFIED",
+ 1: "AND",
+ 2: "OR",
+ 3: "AND_WITH_MATCHING_RESOURCE",
+ }
+ AlertPolicy_ConditionCombinerType_value = map[string]int32{
+ "COMBINE_UNSPECIFIED": 0,
+ "AND": 1,
+ "OR": 2,
+ "AND_WITH_MATCHING_RESOURCE": 3,
+ }
+)
+
+func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType {
+ p := new(AlertPolicy_ConditionCombinerType)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_ConditionCombinerType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor()
+}
+
+func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[0]
+}
+
+func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead.
+func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// An enumeration of possible severity level for an alerting policy.
+type AlertPolicy_Severity int32
+
+const (
+ // No severity is specified. This is the default value.
+ AlertPolicy_SEVERITY_UNSPECIFIED AlertPolicy_Severity = 0
+ // This is the highest severity level. Use this if the problem could
+ // cause significant damage or downtime.
+ AlertPolicy_CRITICAL AlertPolicy_Severity = 1
+ // This is the medium severity level. Use this if the problem could
+ // cause minor damage or downtime.
+ AlertPolicy_ERROR AlertPolicy_Severity = 2
+ // This is the lowest severity level. Use this if the problem is not causing
+ // any damage or downtime, but could potentially lead to a problem in the
+ // future.
+ AlertPolicy_WARNING AlertPolicy_Severity = 3
+)
+
+// Enum value maps for AlertPolicy_Severity.
+var (
+ AlertPolicy_Severity_name = map[int32]string{
+ 0: "SEVERITY_UNSPECIFIED",
+ 1: "CRITICAL",
+ 2: "ERROR",
+ 3: "WARNING",
+ }
+ AlertPolicy_Severity_value = map[string]int32{
+ "SEVERITY_UNSPECIFIED": 0,
+ "CRITICAL": 1,
+ "ERROR": 2,
+ "WARNING": 3,
+ }
+)
+
+func (x AlertPolicy_Severity) Enum() *AlertPolicy_Severity {
+ p := new(AlertPolicy_Severity)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_Severity) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_Severity) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[1].Descriptor()
+}
+
+func (AlertPolicy_Severity) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[1]
+}
+
+func (x AlertPolicy_Severity) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_Severity.Descriptor instead.
+func (AlertPolicy_Severity) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1}
+}
+
+// A condition control that determines how metric-threshold conditions
+// are evaluated when data stops arriving.
+// This control doesn't affect metric-absence policies.
+type AlertPolicy_Condition_EvaluationMissingData int32
+
+const (
+ // An unspecified evaluation missing data option. Equivalent to
+ // EVALUATION_MISSING_DATA_NO_OP.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED AlertPolicy_Condition_EvaluationMissingData = 0
+ // If there is no data to evaluate the condition, then evaluate the
+ // condition as false.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_INACTIVE AlertPolicy_Condition_EvaluationMissingData = 1
+ // If there is no data to evaluate the condition, then evaluate the
+ // condition as true.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_ACTIVE AlertPolicy_Condition_EvaluationMissingData = 2
+ // Do not evaluate the condition to any value if there is no data.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_NO_OP AlertPolicy_Condition_EvaluationMissingData = 3
+)
+
+// Enum value maps for AlertPolicy_Condition_EvaluationMissingData.
+var (
+ AlertPolicy_Condition_EvaluationMissingData_name = map[int32]string{
+ 0: "EVALUATION_MISSING_DATA_UNSPECIFIED",
+ 1: "EVALUATION_MISSING_DATA_INACTIVE",
+ 2: "EVALUATION_MISSING_DATA_ACTIVE",
+ 3: "EVALUATION_MISSING_DATA_NO_OP",
+ }
+ AlertPolicy_Condition_EvaluationMissingData_value = map[string]int32{
+ "EVALUATION_MISSING_DATA_UNSPECIFIED": 0,
+ "EVALUATION_MISSING_DATA_INACTIVE": 1,
+ "EVALUATION_MISSING_DATA_ACTIVE": 2,
+ "EVALUATION_MISSING_DATA_NO_OP": 3,
+ }
+)
+
+func (x AlertPolicy_Condition_EvaluationMissingData) Enum() *AlertPolicy_Condition_EvaluationMissingData {
+ p := new(AlertPolicy_Condition_EvaluationMissingData)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_Condition_EvaluationMissingData) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_Condition_EvaluationMissingData) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[2].Descriptor()
+}
+
+func (AlertPolicy_Condition_EvaluationMissingData) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[2]
+}
+
+func (x AlertPolicy_Condition_EvaluationMissingData) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_EvaluationMissingData.Descriptor instead.
+func (AlertPolicy_Condition_EvaluationMissingData) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+// Control when notifications will be sent out.
+type AlertPolicy_AlertStrategy_NotificationPrompt int32
+
+const (
+ // No strategy specified. Treated as error.
+ AlertPolicy_AlertStrategy_NOTIFICATION_PROMPT_UNSPECIFIED AlertPolicy_AlertStrategy_NotificationPrompt = 0
+ // Notify when an incident is opened.
+ AlertPolicy_AlertStrategy_OPENED AlertPolicy_AlertStrategy_NotificationPrompt = 1
+ // Notify when an incident is closed.
+ AlertPolicy_AlertStrategy_CLOSED AlertPolicy_AlertStrategy_NotificationPrompt = 3
+)
+
+// Enum value maps for AlertPolicy_AlertStrategy_NotificationPrompt.
+var (
+ AlertPolicy_AlertStrategy_NotificationPrompt_name = map[int32]string{
+ 0: "NOTIFICATION_PROMPT_UNSPECIFIED",
+ 1: "OPENED",
+ 3: "CLOSED",
+ }
+ AlertPolicy_AlertStrategy_NotificationPrompt_value = map[string]int32{
+ "NOTIFICATION_PROMPT_UNSPECIFIED": 0,
+ "OPENED": 1,
+ "CLOSED": 3,
+ }
+)
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) Enum() *AlertPolicy_AlertStrategy_NotificationPrompt {
+ p := new(AlertPolicy_AlertStrategy_NotificationPrompt)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_AlertStrategy_NotificationPrompt) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[3].Descriptor()
+}
+
+func (AlertPolicy_AlertStrategy_NotificationPrompt) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[3]
+}
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationPrompt.Descriptor instead.
+func (AlertPolicy_AlertStrategy_NotificationPrompt) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+// A description of the conditions under which some aspect of your system is
+// considered to be "unhealthy" and the ways to notify people or services about
+// this state. For an overview of alerting policies, see
+// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/).
+type AlertPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Required if the policy exists. The resource name for this
+ // policy. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy
+ // is created. When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the alerting policy passed as
+ // part of the request.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the policy in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple policies in the same project. The name is
+ // limited to 512 Unicode characters.
+ //
+ // The convention for the display_name of a PrometheusQueryLanguageCondition
+ // is "{rule group name}/{alert name}", where the {rule group name} and
+ // {alert name} should be taken from the corresponding Prometheus
+ // configuration file. This convention is not enforced.
+ // In any case the display_name is not a unique key of the AlertPolicy.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Documentation that is included with notifications and incidents related to
+ // this policy. Best practice is for the documentation to include information
+ // to help responders understand, mitigate, escalate, and correct the
+ // underlying problems detected by the alerting policy. Notification channels
+ // that have limited capacity might not show this documentation.
+ Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `AlertPolicy` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ //
+ // Note that Prometheus {alert name} is a
+ // [valid Prometheus label
+ // names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels),
+ // whereas Prometheus {rule group} is an unrestricted UTF-8 string.
+ // This means that they cannot be stored as-is in user labels, because
+ // they may contain characters that are not allowed in user-label values.
+ UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A list of conditions for the policy. The conditions are combined by AND or
+ // OR according to the `combiner` field. If the combined conditions evaluate
+ // to true, then an incident is created. A policy can have from one to six
+ // conditions.
+ // If `condition_time_series_query_language` is present, it must be the only
+ // `condition`.
+ // If `condition_monitoring_query_language` is present, it must be the only
+ // `condition`.
+ Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"`
+ // How to combine the results of multiple conditions to determine if an
+ // incident should be opened.
+ // If `condition_time_series_query_language` is present, this must be
+ // `COMBINE_UNSPECIFIED`.
+ Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"`
+ // Whether or not the policy is enabled. On write, the default interpretation
+ // if unset is that the policy is enabled. On read, clients should not make
+ // any assumption about the state if it has not been populated. The
+ // field should always be populated on List and Get operations, unless
+ // a field projection has been specified that strips it out.
+ Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Read-only description of how the alerting policy is invalid. This field is
+ // only set when the alerting policy is invalid. An invalid alerting policy
+ // will not generate incidents.
+ Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"`
+ // Identifies the notification channels to which notifications should be sent
+ // when incidents are opened or closed or when new violations occur on
+ // an already opened incident. Each element of this array corresponds to
+ // the `name` field in each of the
+ // [`NotificationChannel`][google.monitoring.v3.NotificationChannel]
+ // objects that are returned from the [`ListNotificationChannels`]
+ // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // method. The format of the entries in this field is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // A read-only record of the creation of the alerting policy. If provided
+ // in a call to create or update, this field will be ignored.
+ CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
+ // A read-only record of the most recent change to the alerting policy. If
+ // provided in a call to create or update, this field will be ignored.
+ MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"`
+ // Control over how this alerting policy's notification channels are notified.
+ AlertStrategy *AlertPolicy_AlertStrategy `protobuf:"bytes,21,opt,name=alert_strategy,json=alertStrategy,proto3" json:"alert_strategy,omitempty"`
+ // Optional. The severity of an alerting policy indicates how important
+ // incidents generated by that policy are. The severity level will be
+ // displayed on the Incident detail page and in notifications.
+ Severity AlertPolicy_Severity `protobuf:"varint,22,opt,name=severity,proto3,enum=google.monitoring.v3.AlertPolicy_Severity" json:"severity,omitempty"`
+}
+
+func (x *AlertPolicy) Reset() {
+ *x = AlertPolicy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy) ProtoMessage() {}
+
+func (x *AlertPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AlertPolicy) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlertPolicy) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation {
+ if x != nil {
+ return x.Documentation
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition {
+ if x != nil {
+ return x.Conditions
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType {
+ if x != nil {
+ return x.Combiner
+ }
+ return AlertPolicy_COMBINE_UNSPECIFIED
+}
+
+func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Enabled
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetValidity() *status.Status {
+ if x != nil {
+ return x.Validity
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetNotificationChannels() []string {
+ if x != nil {
+ return x.NotificationChannels
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetCreationRecord() *MutationRecord {
+ if x != nil {
+ return x.CreationRecord
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetMutationRecord() *MutationRecord {
+ if x != nil {
+ return x.MutationRecord
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetAlertStrategy() *AlertPolicy_AlertStrategy {
+ if x != nil {
+ return x.AlertStrategy
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity {
+ if x != nil {
+ return x.Severity
+ }
+ return AlertPolicy_SEVERITY_UNSPECIFIED
+}
+
+// Documentation that is included in the notifications and incidents
+// pertaining to this policy.
+type AlertPolicy_Documentation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The body of the documentation, interpreted according to `mime_type`.
+ // The content may not exceed 8,192 Unicode characters and may not exceed
+ // more than 10,240 bytes when encoded in UTF-8 format, whichever is
+ // smaller. This text can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ // The format of the `content` field. Presently, only the value
+ // `"text/markdown"` is supported. See
+ // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information.
+ MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"`
+ // Optional. The subject line of the notification. The subject line may not
+ // exceed 10,240 bytes. In notifications generated by this policy, the
+ // contents of the subject line after variable expansion will be truncated
+ // to 255 bytes or shorter at the latest UTF-8 character boundary. The
+ // 255-byte limit is recommended by [this
+ // thread](https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit).
+ // It is both the limit imposed by some third-party ticketing products and
+ // it is common to define textual fields in databases as VARCHAR(255).
+ //
+ // The contents of the subject line can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ // If this field is missing or empty, a default subject line will be
+ // generated.
+ Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"`
+ // Optional. Links to content such as playbooks, repositories, and other
+ // resources. This field can contain up to 3 entries.
+ Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *AlertPolicy_Documentation) Reset() {
+ *x = AlertPolicy_Documentation{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Documentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Documentation) ProtoMessage() {}
+
+func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *AlertPolicy_Documentation) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetMimeType() string {
+ if x != nil {
+ return x.MimeType
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+// A condition is a true/false test that determines when an alerting policy
+// should open an incident. If a condition evaluates to true, it signifies
+// that something is wrong.
+type AlertPolicy_Condition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required if the condition exists. The unique resource name for this
+ // condition. Its format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
+ //
+ // `[CONDITION_ID]` is assigned by Cloud Monitoring when the
+ // condition is created as part of a new or updated alerting policy.
+ //
+ // When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the conditions of the
+ // requested alerting policy. Cloud Monitoring creates the
+ // condition identifiers and includes them in the new policy.
+ //
+ // When calling the
+ // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy]
+ // method to update a policy, including a condition `name` causes the
+ // existing condition to be updated. Conditions without names are added to
+ // the updated policy. Existing conditions are deleted if they are not
+ // updated.
+ //
+ // Best practice is to preserve `[CONDITION_ID]` if you make only small
+ // changes, such as those to condition thresholds, durations, or trigger
+ // values. Otherwise, treat the change as a new condition and let the
+ // existing condition be deleted.
+ Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the condition in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple conditions in the same policy.
+ DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Only one of the following condition types will be specified.
+ //
+ // Types that are assignable to Condition:
+ //
+ // *AlertPolicy_Condition_ConditionThreshold
+ // *AlertPolicy_Condition_ConditionAbsent
+ // *AlertPolicy_Condition_ConditionMatchedLog
+ // *AlertPolicy_Condition_ConditionMonitoringQueryLanguage
+ // *AlertPolicy_Condition_ConditionPrometheusQueryLanguage
+ // *AlertPolicy_Condition_ConditionSql
+ Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"`
+}
+
+func (x *AlertPolicy_Condition) Reset() {
+ *x = AlertPolicy_Condition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *AlertPolicy_Condition) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition {
+ if m != nil {
+ return m.Condition
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok {
+ return x.ConditionThreshold
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok {
+ return x.ConditionAbsent
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionMatchedLog() *AlertPolicy_Condition_LogMatch {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMatchedLog); ok {
+ return x.ConditionMatchedLog
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionMonitoringQueryLanguage() *AlertPolicy_Condition_MonitoringQueryLanguageCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMonitoringQueryLanguage); ok {
+ return x.ConditionMonitoringQueryLanguage
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionPrometheusQueryLanguage() *AlertPolicy_Condition_PrometheusQueryLanguageCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionPrometheusQueryLanguage); ok {
+ return x.ConditionPrometheusQueryLanguage
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionSql() *AlertPolicy_Condition_SqlCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionSql); ok {
+ return x.ConditionSql
+ }
+ return nil
+}
+
+type isAlertPolicy_Condition_Condition interface {
+ isAlertPolicy_Condition_Condition()
+}
+
+type AlertPolicy_Condition_ConditionThreshold struct {
+ // A condition that compares a time series against a threshold.
+ ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionAbsent struct {
+ // A condition that checks that a time series continues to
+ // receive new data points.
+ ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionMatchedLog struct {
+ // A condition that checks for log messages matching given constraints. If
+ // set, no other conditions can be present.
+ ConditionMatchedLog *AlertPolicy_Condition_LogMatch `protobuf:"bytes,20,opt,name=condition_matched_log,json=conditionMatchedLog,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionMonitoringQueryLanguage struct {
+ // A condition that uses the Monitoring Query Language to define
+ // alerts.
+ ConditionMonitoringQueryLanguage *AlertPolicy_Condition_MonitoringQueryLanguageCondition `protobuf:"bytes,19,opt,name=condition_monitoring_query_language,json=conditionMonitoringQueryLanguage,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionPrometheusQueryLanguage struct {
+ // A condition that uses the Prometheus query language to define alerts.
+ ConditionPrometheusQueryLanguage *AlertPolicy_Condition_PrometheusQueryLanguageCondition `protobuf:"bytes,21,opt,name=condition_prometheus_query_language,json=conditionPrometheusQueryLanguage,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionSql struct {
+ // A condition that periodically evaluates a SQL query result.
+ ConditionSql *AlertPolicy_Condition_SqlCondition `protobuf:"bytes,22,opt,name=condition_sql,json=conditionSql,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionMatchedLog) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionSql) isAlertPolicy_Condition_Condition() {}
+
+// Control over how the notification channels in `notification_channels`
+// are notified when this alert fires.
+type AlertPolicy_AlertStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required for log-based alerting policies, i.e. policies with a `LogMatch`
+ // condition.
+ //
+ // This limit is not implemented for alerting policies that do not have
+ // a LogMatch condition.
+ NotificationRateLimit *AlertPolicy_AlertStrategy_NotificationRateLimit `protobuf:"bytes,1,opt,name=notification_rate_limit,json=notificationRateLimit,proto3" json:"notification_rate_limit,omitempty"`
+ // For log-based alert policies, the notification prompts is always
+ // [OPENED]. For non log-based alert policies, the notification prompts can
+ // be [OPENED] or [OPENED, CLOSED].
+ NotificationPrompts []AlertPolicy_AlertStrategy_NotificationPrompt `protobuf:"varint,2,rep,packed,name=notification_prompts,json=notificationPrompts,proto3,enum=google.monitoring.v3.AlertPolicy_AlertStrategy_NotificationPrompt" json:"notification_prompts,omitempty"`
+ // If an alerting policy that was active has no data for this long, any open
+ // incidents will close
+ AutoClose *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"`
+ // Control how notifications will be sent out, on a per-channel basis.
+ NotificationChannelStrategy []*AlertPolicy_AlertStrategy_NotificationChannelStrategy `protobuf:"bytes,4,rep,name=notification_channel_strategy,json=notificationChannelStrategy,proto3" json:"notification_channel_strategy,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy) Reset() {
+ *x = AlertPolicy_AlertStrategy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationRateLimit() *AlertPolicy_AlertStrategy_NotificationRateLimit {
+ if x != nil {
+ return x.NotificationRateLimit
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationPrompts() []AlertPolicy_AlertStrategy_NotificationPrompt {
+ if x != nil {
+ return x.NotificationPrompts
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetAutoClose() *durationpb.Duration {
+ if x != nil {
+ return x.AutoClose
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPolicy_AlertStrategy_NotificationChannelStrategy {
+ if x != nil {
+ return x.NotificationChannelStrategy
+ }
+ return nil
+}
+
+// Links to content such as playbooks, repositories, and other resources.
+type AlertPolicy_Documentation_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A short display name for the link. The display name must not be empty
+ // or exceed 63 characters. Example: "playbook".
+ DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The url of a webpage.
+ // A url can be templatized by using variables
+ // in the path or the query parameters. The total length of a URL should
+ // not exceed 2083 characters before and after variable expansion.
+ // Example: "https://my_domain.com/playbook?name=${resource.name}"
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *AlertPolicy_Documentation_Link) Reset() {
+ *x = AlertPolicy_Documentation_Link{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Documentation_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Documentation_Link) ProtoMessage() {}
+
+func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+func (x *AlertPolicy_Documentation_Link) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation_Link) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+// Specifies how many time series must fail a predicate to trigger a
+// condition. If not specified, then a `{count: 1}` trigger is used.
+type AlertPolicy_Condition_Trigger struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A type of trigger.
+ //
+ // Types that are assignable to Type:
+ //
+ // *AlertPolicy_Condition_Trigger_Count
+ // *AlertPolicy_Condition_Trigger_Percent
+ Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"`
+}
+
+func (x *AlertPolicy_Condition_Trigger) Reset() {
+ *x = AlertPolicy_Condition_Trigger{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_Trigger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_Trigger) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_Trigger) GetCount() int32 {
+ if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 {
+ if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok {
+ return x.Percent
+ }
+ return 0
+}
+
+type isAlertPolicy_Condition_Trigger_Type interface {
+ isAlertPolicy_Condition_Trigger_Type()
+}
+
+type AlertPolicy_Condition_Trigger_Count struct {
+ // The absolute number of time series that must fail
+ // the predicate for the condition to be triggered.
+ Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_Trigger_Percent struct {
+ // The percentage of time series that must fail the
+ // predicate for the condition to be triggered.
+ Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {}
+
+func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {}
+
+// A condition type that compares a collection of time series
+// against a threshold.
+type AlertPolicy_Condition_MetricThreshold struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A
+ // [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list)
+ // (that call is useful to verify the time series that will be retrieved /
+ // processed). The filter must specify the metric type and the resource
+ // type. Optionally, it can specify resource labels and metric labels.
+ // This field must not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies a time series that should be used as the denominator of a
+ // ratio that will be compared with the threshold. If a
+ // `denominator_filter` is specified, the time series specified by the
+ // `filter` field will be used as the numerator.
+ //
+ // The filter must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"`
+ // Specifies the alignment of data points in individual time series
+ // selected by `denominatorFilter` as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources).
+ //
+ // When computing ratios, the `aggregations` and
+ // `denominator_aggregations` fields must use the same alignment period
+ // and produce time series that have the same periodicity and labels.
+ DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"`
+ // When this field is present, the `MetricThreshold` condition forecasts
+ // whether the time series is predicted to violate the threshold within
+ // the `forecast_horizon`. When this field is not set, the
+ // `MetricThreshold` tests the current value of the timeseries against the
+ // threshold.
+ ForecastOptions *AlertPolicy_Condition_MetricThreshold_ForecastOptions `protobuf:"bytes,12,opt,name=forecast_options,json=forecastOptions,proto3" json:"forecast_options,omitempty"`
+ // The comparison to apply between the time series (indicated by `filter`
+ // and `aggregation`) and the threshold (indicated by `threshold_value`).
+ // The comparison is applied on each time series, with the time series
+ // on the left-hand side and the threshold on the right-hand side.
+ //
+ // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently.
+ Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // A value against which to compare the time series.
+ ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g., 0, 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. When choosing a duration, it is useful to
+ // keep in mind the frequency of the underlying time series data
+ // (which may also be affected by any alignments specified in the
+ // `aggregations` field); a good duration is long enough so that a single
+ // outlier does not generate spurious alerts, but short enough that
+ // unhealthy states are detected and alerted on quickly.
+ Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ // A condition control that determines how metric-threshold conditions
+ // are evaluated when data stops arriving. To use this control, the value
+ // of the `duration` field must be greater than or equal to 60 seconds.
+ EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) Reset() {
+ *x = AlertPolicy_Condition_MetricThreshold{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation {
+ if x != nil {
+ return x.Aggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string {
+ if x != nil {
+ return x.DenominatorFilter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation {
+ if x != nil {
+ return x.DenominatorAggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetForecastOptions() *AlertPolicy_Condition_MetricThreshold_ForecastOptions {
+ if x != nil {
+ return x.ForecastOptions
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType {
+ if x != nil {
+ return x.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 {
+ if x != nil {
+ return x.ThresholdValue
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData {
+ if x != nil {
+ return x.EvaluationMissingData
+ }
+ return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED
+}
+
+// A condition type that checks that monitored resources
+// are reporting data. The configuration defines a metric and
+// a set of monitored resources. The predicate is considered in violation
+// when a time series for the specified metric of a monitored
+// resource does not include any data in the specified `duration`.
+type AlertPolicy_Condition_MetricAbsence struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A
+ // [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list)
+ // (that call is useful to verify the time series that will be retrieved /
+ // processed). The filter must specify the metric type and the resource
+ // type. Optionally, it can specify resource labels and metric labels.
+ // This field must not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // The amount of time that a time series must fail to report new
+ // data to be considered failing. The minimum value of this field
+ // is 120 seconds. Larger values that are a multiple of a
+ // minute--for example, 240 or 300 seconds--are supported.
+ // If an invalid value is given, an
+ // error will be returned. The `Duration.nanos` field is
+ // ignored.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) Reset() {
+ *x = AlertPolicy_Condition_MetricAbsence{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2}
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation {
+ if x != nil {
+ return x.Aggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+// A condition type that checks whether a log message in the [scoping
+// project](https://cloud.google.com/monitoring/api/v3#project_name)
+// satisfies the given filter. Logs from other projects in the metrics
+// scope are not evaluated.
+type AlertPolicy_Condition_LogMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A logs-based filter. See [Advanced Logs
+ // Queries](https://cloud.google.com/logging/docs/view/advanced-queries)
+ // for how this filter should be constructed.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A map from a label key to an extractor expression, which is
+ // used to extract the value for this label key. Each entry in this map is
+ // a specification for how data should be extracted from log entries that
+ // match `filter`. Each combination of extracted values is treated as a
+ // separate rule for the purposes of triggering notifications. Label keys
+ // and corresponding values can be used in notifications generated by this
+ // condition.
+ //
+ // Please see [the documentation on logs-based metric
+ // `valueExtractor`s](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor)
+ // for syntax and examples.
+ LabelExtractors map[string]string `protobuf:"bytes,2,rep,name=label_extractors,json=labelExtractors,proto3" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *AlertPolicy_Condition_LogMatch) Reset() {
+ *x = AlertPolicy_Condition_LogMatch{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_LogMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_LogMatch.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_LogMatch) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 3}
+}
+
+func (x *AlertPolicy_Condition_LogMatch) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_LogMatch) GetLabelExtractors() map[string]string {
+ if x != nil {
+ return x.LabelExtractors
+ }
+ return nil
+}
+
+// A condition type that allows alerting policies to be defined using
+// [Monitoring Query Language](https://cloud.google.com/monitoring/mql).
+type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // [Monitoring Query Language](https://cloud.google.com/monitoring/mql)
+ // query that outputs a boolean stream.
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g., 0, 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. When choosing a duration, it is useful to
+ // keep in mind the frequency of the underlying time series data
+ // (which may also be affected by any alignments specified in the
+ // `aggregations` field); a good duration is long enough so that a single
+ // outlier does not generate spurious alerts, but short enough that
+ // unhealthy states are detected and alerted on quickly.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ // A condition control that determines how metric-threshold conditions
+ // are evaluated when data stops arriving.
+ EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,4,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() {
+ *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MonitoringQueryLanguageCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 4}
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData {
+ if x != nil {
+ return x.EvaluationMissingData
+ }
+ return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED
+}
+
+// A condition type that allows alerting policies to be defined using
+// [Prometheus Query Language
+// (PromQL)](https://prometheus.io/docs/prometheus/latest/querying/basics/).
+//
+// The PrometheusQueryLanguageCondition message contains information
+// from a Prometheus alerting rule and its associated rule group.
+//
+// A Prometheus alerting rule is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+// The semantics of a Prometheus alerting rule is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule).
+//
+// A Prometheus rule group is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/).
+// The semantics of a Prometheus rule group is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group).
+//
+// Because Cloud Alerting has no representation of a Prometheus rule
+// group resource, we must embed the information of the parent rule
+// group inside each of the conditions that refer to it. We must also
+// update the contents of all Prometheus alerts in case the information
+// of their rule group changes.
+//
+// The PrometheusQueryLanguageCondition protocol buffer combines the
+// information of the corresponding rule group and alerting rule.
+// The structure of the PrometheusQueryLanguageCondition protocol buffer
+// does NOT mimic the structure of the Prometheus rule group and alerting
+// rule YAML declarations. The PrometheusQueryLanguageCondition protocol
+// buffer may change in the future to support future rule group and/or
+// alerting rule features. There are no new such features at the present
+// time (2023-06-26).
+type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The PromQL expression to evaluate. Every evaluation cycle
+ // this expression is evaluated at the current time, and all resultant
+ // time series become pending/firing alerts. This field must not be empty.
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // Optional. Alerts are considered firing once their PromQL expression was
+ // evaluated to be "true" for this long.
+ // Alerts whose PromQL expression was not evaluated to be "true" for
+ // long enough are considered pending.
+ // Must be a non-negative duration or missing.
+ // This field is optional. Its default value is zero.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // Optional. How often this rule should be evaluated.
+ // Must be a positive multiple of 30 seconds or missing.
+ // This field is optional. Its default value is 30 seconds.
+ // If this PrometheusQueryLanguageCondition was generated from a
+ // Prometheus alerting rule, then this value should be taken from the
+ // enclosing rule group.
+ EvaluationInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=evaluation_interval,json=evaluationInterval,proto3" json:"evaluation_interval,omitempty"`
+ // Optional. Labels to add to or overwrite in the PromQL query result.
+ // Label names [must be
+ // valid](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
+ // Label values can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ // The only available variable names are the names of the labels in the
+ // PromQL result, including "__name__" and "value". "labels" may be empty.
+ Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Optional. The rule group name of this alert in the corresponding
+ // Prometheus configuration file.
+ //
+ // Some external tools may require this field to be populated correctly
+ // in order to refer to the original Prometheus configuration file.
+ // The rule group name and the alert name are necessary to update the
+ // relevant AlertPolicies in case the definition of the rule group changes
+ // in the future.
+ //
+ // This field is optional. If this field is not empty, then it must
+ // contain a valid UTF-8 string.
+ // This field may not exceed 2048 Unicode characters in length.
+ RuleGroup string `protobuf:"bytes,5,opt,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"`
+ // Optional. The alerting rule name of this alert in the corresponding
+ // Prometheus configuration file.
+ //
+ // Some external tools may require this field to be populated correctly
+ // in order to refer to the original Prometheus configuration file.
+ // The rule group name and the alert name are necessary to update the
+ // relevant AlertPolicies in case the definition of the rule group changes
+ // in the future.
+ //
+ // This field is optional. If this field is not empty, then it must be a
+ // [valid Prometheus label
+ // name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
+ // This field may not exceed 2048 Unicode characters in length.
+ AlertRule string `protobuf:"bytes,6,opt,name=alert_rule,json=alertRule,proto3" json:"alert_rule,omitempty"`
+ // Optional. Whether to disable metric existence validation for this
+ // condition.
+ //
+ // This allows alerting policies to be defined on metrics that do not yet
+ // exist, improving advanced customer workflows such as configuring
+ // alerting policies using Terraform.
+ //
+ // Users with the `monitoring.alertPolicyViewer` role are able to see the
+ // name of the non-existent metric in the alerting policy condition.
+ DisableMetricValidation bool `protobuf:"varint,7,opt,name=disable_metric_validation,json=disableMetricValidation,proto3" json:"disable_metric_validation,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() {
+ *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_PrometheusQueryLanguageCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 5}
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetEvaluationInterval() *durationpb.Duration {
+ if x != nil {
+ return x.EvaluationInterval
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetRuleGroup() string {
+ if x != nil {
+ return x.RuleGroup
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetAlertRule() string {
+ if x != nil {
+ return x.AlertRule
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDisableMetricValidation() bool {
+ if x != nil {
+ return x.DisableMetricValidation
+ }
+ return false
+}
+
+// A condition that allows alerting policies to be defined using GoogleSQL.
+// SQL conditions examine a sliding window of logs using GoogleSQL.
+// Alert policies with SQL conditions may incur additional billing.
+type AlertPolicy_Condition_SqlCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Log Analytics SQL query to run, as a string. The query
+ // must conform to the required shape. Specifically, the query must not
+ // try to filter the input by time. A filter will automatically be
+ // applied to filter the input so that the query receives all rows
+ // received since the last time the query was run.
+ //
+ // For example, the following query extracts all log entries containing an
+ // HTTP request:
+ //
+ // SELECT
+ // timestamp, log_name, severity, http_request, resource, labels
+ // FROM
+ // my-project.global._Default._AllLogs
+ // WHERE
+ // http_request IS NOT NULL
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // The schedule indicates how often the query should be run.
+ //
+ // Types that are assignable to Schedule:
+ //
+ // *AlertPolicy_Condition_SqlCondition_Minutes_
+ // *AlertPolicy_Condition_SqlCondition_Hourly_
+ // *AlertPolicy_Condition_SqlCondition_Daily_
+ Schedule isAlertPolicy_Condition_SqlCondition_Schedule `protobuf_oneof:"schedule"`
+ // The test to be run against the SQL result set.
+ //
+ // Types that are assignable to Evaluate:
+ //
+ // *AlertPolicy_Condition_SqlCondition_RowCountTest_
+ // *AlertPolicy_Condition_SqlCondition_BooleanTest_
+ Evaluate isAlertPolicy_Condition_SqlCondition_Evaluate `protobuf_oneof:"evaluate"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_SqlCondition) GetSchedule() isAlertPolicy_Condition_SqlCondition_Schedule {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetMinutes() *AlertPolicy_Condition_SqlCondition_Minutes {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Minutes_); ok {
+ return x.Minutes
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetHourly() *AlertPolicy_Condition_SqlCondition_Hourly {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Hourly_); ok {
+ return x.Hourly
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetDaily() *AlertPolicy_Condition_SqlCondition_Daily {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Daily_); ok {
+ return x.Daily
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_SqlCondition) GetEvaluate() isAlertPolicy_Condition_SqlCondition_Evaluate {
+ if m != nil {
+ return m.Evaluate
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetRowCountTest() *AlertPolicy_Condition_SqlCondition_RowCountTest {
+ if x, ok := x.GetEvaluate().(*AlertPolicy_Condition_SqlCondition_RowCountTest_); ok {
+ return x.RowCountTest
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetBooleanTest() *AlertPolicy_Condition_SqlCondition_BooleanTest {
+ if x, ok := x.GetEvaluate().(*AlertPolicy_Condition_SqlCondition_BooleanTest_); ok {
+ return x.BooleanTest
+ }
+ return nil
+}
+
+type isAlertPolicy_Condition_SqlCondition_Schedule interface {
+ isAlertPolicy_Condition_SqlCondition_Schedule()
+}
+
+type AlertPolicy_Condition_SqlCondition_Minutes_ struct {
+ // Schedule the query to execute every so many minutes.
+ Minutes *AlertPolicy_Condition_SqlCondition_Minutes `protobuf:"bytes,2,opt,name=minutes,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_Hourly_ struct {
+ // Schedule the query to execute every so many hours.
+ Hourly *AlertPolicy_Condition_SqlCondition_Hourly `protobuf:"bytes,3,opt,name=hourly,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_Daily_ struct {
+ // Schedule the query to execute every so many days.
+ Daily *AlertPolicy_Condition_SqlCondition_Daily `protobuf:"bytes,4,opt,name=daily,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Minutes_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+func (*AlertPolicy_Condition_SqlCondition_Hourly_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+func (*AlertPolicy_Condition_SqlCondition_Daily_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+type isAlertPolicy_Condition_SqlCondition_Evaluate interface {
+ isAlertPolicy_Condition_SqlCondition_Evaluate()
+}
+
+type AlertPolicy_Condition_SqlCondition_RowCountTest_ struct {
+ // Test the row count against a threshold.
+ RowCountTest *AlertPolicy_Condition_SqlCondition_RowCountTest `protobuf:"bytes,5,opt,name=row_count_test,json=rowCountTest,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_BooleanTest_ struct {
+ // Test the boolean value in the indicated column.
+ BooleanTest *AlertPolicy_Condition_SqlCondition_BooleanTest `protobuf:"bytes,6,opt,name=boolean_test,json=booleanTest,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest_) isAlertPolicy_Condition_SqlCondition_Evaluate() {
+}
+
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest_) isAlertPolicy_Condition_SqlCondition_Evaluate() {
+}
+
+// Options used when forecasting the time series and testing
+// the predicted value against the threshold.
+type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The length of time into the future to forecast whether a
+ // time series will violate the threshold. If the predicted value is
+ // found to violate the threshold, and the violation is observed in all
+ // forecasts made for the configured `duration`, then the time series is
+ // considered to be failing.
+ // The forecast horizon can range from 1 hour to 60 hours.
+ ForecastHorizon *durationpb.Duration `protobuf:"bytes,1,opt,name=forecast_horizon,json=forecastHorizon,proto3" json:"forecast_horizon,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() {
+ *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricThreshold_ForecastOptions.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1, 0}
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) GetForecastHorizon() *durationpb.Duration {
+ if x != nil {
+ return x.ForecastHorizon
+ }
+ return nil
+}
+
+// Used to schedule the query to run every so many minutes.
+type AlertPolicy_Condition_SqlCondition_Minutes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Number of minutes between runs. The interval must be
+ // greater than or equal to 5 minutes and less than or equal to 1440
+ // minutes.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Minutes{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Minutes) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Minutes.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Minutes) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 0}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+// Used to schedule the query to run every so many hours.
+type AlertPolicy_Condition_SqlCondition_Hourly struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The number of hours between runs. Must be greater than or
+ // equal to 1 hour and less than or equal to 48 hours.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+ // Optional. The number of minutes after the hour (in UTC) to run the
+ // query. Must be greater than or equal to 0 minutes and less than or
+ // equal to 59 minutes. If left unspecified, then an arbitrary offset
+ // is used.
+ MinuteOffset *int32 `protobuf:"varint,2,opt,name=minute_offset,json=minuteOffset,proto3,oneof" json:"minute_offset,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Hourly{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Hourly) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Hourly.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Hourly) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 1}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) GetMinuteOffset() int32 {
+ if x != nil && x.MinuteOffset != nil {
+ return *x.MinuteOffset
+ }
+ return 0
+}
+
+// Used to schedule the query to run every so many days.
+type AlertPolicy_Condition_SqlCondition_Daily struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The number of days between runs. Must be greater than or
+ // equal to 1 day and less than or equal to 31 days.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+ // Optional. The time of day (in UTC) at which the query should run. If
+ // left unspecified, the server picks an arbitrary time of day and runs
+ // the query at the same time each day.
+ ExecutionTime *timeofday.TimeOfDay `protobuf:"bytes,2,opt,name=execution_time,json=executionTime,proto3" json:"execution_time,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Daily{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Daily) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Daily.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Daily) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 2}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) GetExecutionTime() *timeofday.TimeOfDay {
+ if x != nil {
+ return x.ExecutionTime
+ }
+ return nil
+}
+
+// A test that checks if the number of rows in the result set
+// violates some threshold.
+type AlertPolicy_Condition_SqlCondition_RowCountTest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The comparison to apply between the number of rows returned
+ // by the query and the threshold.
+ Comparison ComparisonType `protobuf:"varint,1,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // Required. The value against which to compare the row count.
+ Threshold int64 `protobuf:"varint,2,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_RowCountTest{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[19]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_RowCountTest.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 3}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) GetComparison() ComparisonType {
+ if x != nil {
+ return x.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) GetThreshold() int64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+// A test that uses an alerting result in a boolean column produced by
+// the SQL query.
+type AlertPolicy_Condition_SqlCondition_BooleanTest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the column containing the boolean value. If the
+ // value in a row is NULL, that row is ignored.
+ Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_BooleanTest{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_BooleanTest.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 4}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) GetColumn() string {
+ if x != nil {
+ return x.Column
+ }
+ return ""
+}
+
+// Control over the rate of notifications sent to this alerting policy's
+// notification channels.
+type AlertPolicy_AlertStrategy_NotificationRateLimit struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Not more than one notification per `period`.
+ Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() {
+ *x = AlertPolicy_AlertStrategy_NotificationRateLimit{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[21]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationRateLimit.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy_NotificationRateLimit) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) GetPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.Period
+ }
+ return nil
+}
+
+// Control over how the notification channels in `notification_channels`
+// are notified when this alert fires, on a per-channel basis.
+type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full REST resource name for the notification channels that these
+ // settings apply to. Each of these correspond to the name field in one
+ // of the NotificationChannel objects referenced in the
+ // notification_channels field of this AlertPolicy.
+ // The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"`
+ // The frequency at which to send reminder notifications for open
+ // incidents.
+ RenotifyInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=renotify_interval,json=renotifyInterval,proto3" json:"renotify_interval,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() {
+ *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationChannelStrategy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 1}
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetNotificationChannelNames() []string {
+ if x != nil {
+ return x.NotificationChannelNames
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetRenotifyInterval() *durationpb.Duration {
+ if x != nil {
+ return x.RenotifyInterval
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_alert_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x66,
+ 0x64, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x35, 0x0a, 0x0b, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61,
+ 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e,
+ 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64,
+ 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b,
+ 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a,
+ 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62,
+ 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e,
+ 0x65, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x69, 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a,
+ 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f,
+ 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x56, 0x0a, 0x0e, 0x61,
+ 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61,
+ 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74,
+ 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18,
+ 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79,
+ 0x1a, 0xf3, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6d, 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d,
+ 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x1a, 0xa5, 0x23, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70,
+ 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+ 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65,
+ 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73,
+ 0x65, 0x6e, 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12,
+ 0x9d, 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
+ 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12,
+ 0x9d, 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
+ 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12,
+ 0x5f, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x71, 0x6c,
+ 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x71, 0x6c,
+ 0x1a, 0x45, 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42,
+ 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x2d, 0x0a, 0x12, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e,
+ 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c,
+ 0x0a, 0x18, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67,
+ 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72,
+ 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10,
+ 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68,
+ 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72,
+ 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72,
+ 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61,
+ 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65,
+ 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67,
+ 0x44, 0x61, 0x74, 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63,
+ 0x61, 0x73, 0x74, 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a,
+ 0x6f, 0x6e, 0x1a, 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73,
+ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72,
+ 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1,
+ 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78,
+ 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42,
+ 0x0a, 0x14, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x1a, 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a,
+ 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73,
+ 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x85,
+ 0x04, 0x0a, 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a,
+ 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76,
+ 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c,
+ 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f,
+ 0x72, 0x75, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x19, 0x64, 0x69,
+ 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xee, 0x07, 0x0a, 0x0c, 0x53, 0x71, 0x6c, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x12, 0x5c, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x69,
+ 0x6e, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73,
+ 0x12, 0x59, 0x0a, 0x06, 0x68, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71,
+ 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x6f, 0x75, 0x72, 0x6c,
+ 0x79, 0x48, 0x00, 0x52, 0x06, 0x68, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x12, 0x56, 0x0a, 0x05, 0x64,
+ 0x61, 0x69, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x05, 0x64, 0x61,
+ 0x69, 0x6c, 0x79, 0x12, 0x6d, 0x0a, 0x0e, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x5f, 0x74, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x65,
+ 0x73, 0x74, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x65,
+ 0x73, 0x74, 0x12, 0x69, 0x0a, 0x0c, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x74, 0x65,
+ 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x48, 0x01,
+ 0x52, 0x0b, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x0a,
+ 0x07, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x69,
+ 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x1a,
+ 0x70, 0x0a, 0x06, 0x48, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79,
+ 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0c,
+ 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42,
+ 0x10, 0x0a, 0x0e, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x1a, 0x72, 0x0a, 0x05, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65,
+ 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74,
+ 0x79, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x66, 0x44, 0x61,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x7c, 0x0a, 0x0c, 0x52, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x54, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69,
+ 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e,
+ 0x12, 0x21, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x1a, 0x2a, 0x0a, 0x0b, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42,
+ 0x0a, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x65,
+ 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74,
+ 0x61, 0x12, 0x27, 0x0a, 0x23, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+ 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56,
+ 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47,
+ 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01,
+ 0x12, 0x22, 0x0a, 0x1e, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
+ 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49,
+ 0x56, 0x45, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f,
+ 0x4e, 0x4f, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
+ 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69,
+ 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72,
+ 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01,
+ 0x2a, 0x42, 0x0b, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x96,
+ 0x06, 0x0a, 0x0d, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79,
+ 0x12, 0x7d, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12,
+ 0x75, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x42, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x70,
+ 0x74, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x63,
+ 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65,
+ 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72,
+ 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xa3,
+ 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x3c,
+ 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x11,
+ 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x22, 0x51, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x12, 0x23, 0x0a, 0x1f, 0x4e, 0x4f,
+ 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x4d, 0x50,
+ 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0a, 0x0a, 0x06, 0x4f, 0x50, 0x45, 0x4e, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43,
+ 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, 0x10,
+ 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, 0x44,
+ 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x52,
+ 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, 0x76,
+ 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54,
+ 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a,
+ 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e,
+ 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
+ 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12,
+ 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72,
+ 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f,
+ 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x01,
+ 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc
+)
+
+func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_alert_proto_rawDescData
+}
+
+var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_google_monitoring_v3_alert_proto_goTypes = []any{
+ (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType
+ (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity
+ (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ (AlertPolicy_AlertStrategy_NotificationPrompt)(0), // 3: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationPrompt
+ (*AlertPolicy)(nil), // 4: google.monitoring.v3.AlertPolicy
+ (*AlertPolicy_Documentation)(nil), // 5: google.monitoring.v3.AlertPolicy.Documentation
+ (*AlertPolicy_Condition)(nil), // 6: google.monitoring.v3.AlertPolicy.Condition
+ (*AlertPolicy_AlertStrategy)(nil), // 7: google.monitoring.v3.AlertPolicy.AlertStrategy
+ nil, // 8: google.monitoring.v3.AlertPolicy.UserLabelsEntry
+ (*AlertPolicy_Documentation_Link)(nil), // 9: google.monitoring.v3.AlertPolicy.Documentation.Link
+ (*AlertPolicy_Condition_Trigger)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.Trigger
+ (*AlertPolicy_Condition_MetricThreshold)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold
+ (*AlertPolicy_Condition_MetricAbsence)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence
+ (*AlertPolicy_Condition_LogMatch)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.LogMatch
+ (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition
+ (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition
+ (*AlertPolicy_Condition_SqlCondition)(nil), // 16: google.monitoring.v3.AlertPolicy.Condition.SqlCondition
+ (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 17: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions
+ nil, // 18: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry
+ nil, // 19: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry
+ (*AlertPolicy_Condition_SqlCondition_Minutes)(nil), // 20: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Minutes
+ (*AlertPolicy_Condition_SqlCondition_Hourly)(nil), // 21: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Hourly
+ (*AlertPolicy_Condition_SqlCondition_Daily)(nil), // 22: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily
+ (*AlertPolicy_Condition_SqlCondition_RowCountTest)(nil), // 23: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest
+ (*AlertPolicy_Condition_SqlCondition_BooleanTest)(nil), // 24: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.BooleanTest
+ (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 25: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit
+ (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 26: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy
+ (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue
+ (*status.Status)(nil), // 28: google.rpc.Status
+ (*MutationRecord)(nil), // 29: google.monitoring.v3.MutationRecord
+ (*durationpb.Duration)(nil), // 30: google.protobuf.Duration
+ (*Aggregation)(nil), // 31: google.monitoring.v3.Aggregation
+ (ComparisonType)(0), // 32: google.monitoring.v3.ComparisonType
+ (*timeofday.TimeOfDay)(nil), // 33: google.type.TimeOfDay
+}
+var file_google_monitoring_v3_alert_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation
+ 8, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry
+ 6, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition
+ 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType
+ 27, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue
+ 28, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status
+ 29, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord
+ 29, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord
+ 7, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy
+ 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity
+ 9, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link
+ 11, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold
+ 12, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence
+ 13, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch
+ 14, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition
+ 15, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition
+ 16, // 16: google.monitoring.v3.AlertPolicy.Condition.condition_sql:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition
+ 25, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit
+ 3, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_prompts:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationPrompt
+ 30, // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration
+ 26, // 20: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy
+ 31, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation
+ 31, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation
+ 17, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions
+ 32, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType
+ 30, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration
+ 10, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 2, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ 31, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation
+ 30, // 29: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration
+ 10, // 30: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 18, // 31: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry
+ 30, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration
+ 10, // 33: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 2, // 34: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ 30, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration
+ 30, // 36: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration
+ 19, // 37: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry
+ 20, // 38: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.minutes:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Minutes
+ 21, // 39: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.hourly:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Hourly
+ 22, // 40: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.daily:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily
+ 23, // 41: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.row_count_test:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest
+ 24, // 42: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.boolean_test:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.BooleanTest
+ 30, // 43: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration
+ 33, // 44: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily.execution_time:type_name -> google.type.TimeOfDay
+ 32, // 45: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest.comparison:type_name -> google.monitoring.v3.ComparisonType
+ 30, // 46: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration
+ 30, // 47: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration
+ 48, // [48:48] is the sub-list for method output_type
+ 48, // [48:48] is the sub-list for method input_type
+ 48, // [48:48] is the sub-list for extension type_name
+ 48, // [48:48] is the sub-list for extension extendee
+ 0, // [0:48] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_alert_proto_init() }
+func file_google_monitoring_v3_alert_proto_init() {
+ if File_google_monitoring_v3_alert_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_mutation_record_proto_init()
+ file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{
+ (*AlertPolicy_Condition_ConditionThreshold)(nil),
+ (*AlertPolicy_Condition_ConditionAbsent)(nil),
+ (*AlertPolicy_Condition_ConditionMatchedLog)(nil),
+ (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil),
+ (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil),
+ (*AlertPolicy_Condition_ConditionSql)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{
+ (*AlertPolicy_Condition_Trigger_Count)(nil),
+ (*AlertPolicy_Condition_Trigger_Percent)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[12].OneofWrappers = []any{
+ (*AlertPolicy_Condition_SqlCondition_Minutes_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_Hourly_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_Daily_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_RowCountTest_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_BooleanTest_)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[17].OneofWrappers = []any{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_alert_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_alert_proto = out.File
+ file_google_monitoring_v3_alert_proto_rawDesc = nil
+ file_google_monitoring_v3_alert_proto_goTypes = nil
+ file_google_monitoring_v3_alert_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
new file mode 100644
index 000000000..ba0c4f65f
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
@@ -0,0 +1,961 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/alert_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The protocol for the `CreateAlertPolicy` request.
+type CreateAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the alerting policy. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policy will be written, not the name of the created policy. |name| must be
+ // a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will
+ // return. The alerting policy that is returned will have a name that contains
+ // a normalized representation of this name as a prefix but adds a suffix of
+ // the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the
+ // container.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The requested alerting policy. You should omit the `name` field
+ // in this policy. The name will be returned in the new policy, including a
+ // new `[ALERT_POLICY_ID]` value.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+}
+
+func (x *CreateAlertPolicyRequest) Reset() {
+ *x = CreateAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateAlertPolicyRequest) ProtoMessage() {}
+
+func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if x != nil {
+ return x.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `GetAlertPolicy` request.
+type GetAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The alerting policy to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetAlertPolicyRequest) Reset() {
+ *x = GetAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAlertPolicyRequest) ProtoMessage() {}
+
+func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` request.
+type ListAlertPoliciesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // alert policies are to be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policies to be listed are stored. To retrieve a single alerting policy
+ // by name, use the
+ // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If provided, this field specifies the criteria that must be met
+ // by alert policies to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A comma-separated list of fields by which to sort the result.
+ // Supports the same set of field references as the `filter` field. Entries
+ // can be prefixed with a minus sign to sort by the field in descending order.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Optional. The maximum number of results to return in a single response.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If this field is not empty then it must contain the
+ // `nextPageToken` value returned by a previous call to this method. Using
+ // this field causes the method to return more results from the previous
+ // method call.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListAlertPoliciesRequest) Reset() {
+ *x = ListAlertPoliciesRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListAlertPoliciesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListAlertPoliciesRequest) ProtoMessage() {}
+
+func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead.
+func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListAlertPoliciesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListAlertPoliciesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` response.
+type ListAlertPoliciesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned alert policies.
+ AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"`
+ // If there might be more results than were returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of alert policies in all pages. This number is only an
+ // estimate, and may change in subsequent pages. https://aip.dev/158
+ TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListAlertPoliciesResponse) Reset() {
+ *x = ListAlertPoliciesResponse{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListAlertPoliciesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListAlertPoliciesResponse) ProtoMessage() {}
+
+func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead.
+func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
+ if x != nil {
+ return x.AlertPolicies
+ }
+ return nil
+}
+
+func (x *ListAlertPoliciesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The protocol for the `UpdateAlertPolicy` request.
+type UpdateAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. A list of alerting policy field names. If this field is not
+ // empty, each listed field in the existing alerting policy is set to the
+ // value of the corresponding field in the supplied policy (`alert_policy`),
+ // or to the field's default value if the field is not in the supplied
+ // alerting policy. Fields not listed retain their previous value.
+ //
+ // Examples of valid field masks include `display_name`, `documentation`,
+ // `documentation.content`, `documentation.mime_type`, `user_labels`,
+ // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
+ //
+ // If this field is empty, then the supplied alerting policy replaces the
+ // existing policy. It is the same as deleting the existing policy and
+ // adding the supplied policy, except for the following:
+ //
+ // - The new policy will have the same `[ALERT_POLICY_ID]` as the former
+ // policy. This gives you continuity with the former policy in your
+ // notifications and incidents.
+ // - Conditions in the new policy will keep their former `[CONDITION_ID]` if
+ // the supplied condition includes the `name` field with that
+ // `[CONDITION_ID]`. If the supplied condition omits the `name` field,
+ // then a new `[CONDITION_ID]` is created.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. The updated alerting policy or the updated values for the
+ // fields listed in `update_mask`.
+ // If `update_mask` is not empty, any fields in this policy that are
+ // not in `update_mask` are ignored.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+}
+
+func (x *UpdateAlertPolicyRequest) Reset() {
+ *x = UpdateAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateAlertPolicyRequest) ProtoMessage() {}
+
+func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if x != nil {
+ return x.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `DeleteAlertPolicy` request.
+type DeleteAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The alerting policy to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteAlertPolicyRequest) Reset() {
+ *x = DeleteAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteAlertPolicyRequest) ProtoMessage() {}
+
+func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22,
+ 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x18,
+ 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65,
+ 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac,
+ 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e,
+ 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01,
+ 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c,
+ 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01,
+ 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e,
+ 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01,
+ 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c,
+ 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41,
+ 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f,
+ 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
+ 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_alert_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_google_monitoring_v3_alert_service_proto_goTypes = []any{
+ (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest
+ (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest
+ (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest
+ (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse
+ (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest
+ (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest
+ (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy
+ (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask
+ (*emptypb.Empty)(nil), // 8: google.protobuf.Empty
+}
+var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{
+ 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
+ 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy
+ 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
+ 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest
+ 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest
+ 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest
+ 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest
+ 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest
+ 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse
+ 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty
+ 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 9, // [9:14] is the sub-list for method output_type
+ 4, // [4:9] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_alert_service_proto_init() }
+func file_google_monitoring_v3_alert_service_proto_init() {
+ if File_google_monitoring_v3_alert_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_alert_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_alert_service_proto = out.File
+ file_google_monitoring_v3_alert_service_proto_rawDesc = nil
+ file_google_monitoring_v3_alert_service_proto_goTypes = nil
+ file_google_monitoring_v3_alert_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// AlertPolicyServiceClient is the client API for AlertPolicyService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AlertPolicyServiceClient interface {
+ // Lists the existing alerting policies for the workspace.
+ ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+}
+
+type alertPolicyServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient {
+ return &alertPolicyServiceClient{cc}
+}
+
+func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
+ out := new(ListAlertPoliciesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AlertPolicyServiceServer is the server API for AlertPolicyService service.
+type AlertPolicyServiceServer interface {
+ // Lists the existing alerting policies for the workspace.
+ ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
+}
+
+// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedAlertPolicyServiceServer struct {
+}
+
+func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented")
+}
+
+func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
+ s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
+}
+
+func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListAlertPoliciesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.AlertPolicyService",
+ HandlerType: (*AlertPolicyServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListAlertPolicies",
+ Handler: _AlertPolicyService_ListAlertPolicies_Handler,
+ },
+ {
+ MethodName: "GetAlertPolicy",
+ Handler: _AlertPolicyService_GetAlertPolicy_Handler,
+ },
+ {
+ MethodName: "CreateAlertPolicy",
+ Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
+ },
+ {
+ MethodName: "DeleteAlertPolicy",
+ Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
+ },
+ {
+ MethodName: "UpdateAlertPolicy",
+ Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/alert_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
new file mode 100644
index 000000000..81b8c8f5e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
@@ -0,0 +1,1121 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/common.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ distribution "google.golang.org/genproto/googleapis/api/distribution"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Specifies an ordering relationship on two arguments, called `left` and
+// `right`.
+type ComparisonType int32
+
+const (
+ // No ordering relationship is specified.
+ ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0
+ // True if the left argument is greater than the right argument.
+ ComparisonType_COMPARISON_GT ComparisonType = 1
+ // True if the left argument is greater than or equal to the right argument.
+ ComparisonType_COMPARISON_GE ComparisonType = 2
+ // True if the left argument is less than the right argument.
+ ComparisonType_COMPARISON_LT ComparisonType = 3
+ // True if the left argument is less than or equal to the right argument.
+ ComparisonType_COMPARISON_LE ComparisonType = 4
+ // True if the left argument is equal to the right argument.
+ ComparisonType_COMPARISON_EQ ComparisonType = 5
+ // True if the left argument is not equal to the right argument.
+ ComparisonType_COMPARISON_NE ComparisonType = 6
+)
+
+// Enum value maps for ComparisonType.
+var (
+ ComparisonType_name = map[int32]string{
+ 0: "COMPARISON_UNSPECIFIED",
+ 1: "COMPARISON_GT",
+ 2: "COMPARISON_GE",
+ 3: "COMPARISON_LT",
+ 4: "COMPARISON_LE",
+ 5: "COMPARISON_EQ",
+ 6: "COMPARISON_NE",
+ }
+ ComparisonType_value = map[string]int32{
+ "COMPARISON_UNSPECIFIED": 0,
+ "COMPARISON_GT": 1,
+ "COMPARISON_GE": 2,
+ "COMPARISON_LT": 3,
+ "COMPARISON_LE": 4,
+ "COMPARISON_EQ": 5,
+ "COMPARISON_NE": 6,
+ }
+)
+
+func (x ComparisonType) Enum() *ComparisonType {
+ p := new(ComparisonType)
+ *p = x
+ return p
+}
+
+func (x ComparisonType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ComparisonType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor()
+}
+
+func (ComparisonType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[0]
+}
+
+func (x ComparisonType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ComparisonType.Descriptor instead.
+func (ComparisonType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0}
+}
+
+// The tier of service for a Metrics Scope. Please see the
+// [service tiers
+// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more
+// details.
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/common.proto.
+type ServiceTier int32
+
+const (
+ // An invalid sentinel value, used to indicate that a tier has not
+ // been provided explicitly.
+ ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0
+ // The Cloud Monitoring Basic tier, a free tier of service that provides basic
+ // features, a moderate allotment of logs, and access to built-in metrics.
+ // A number of features are not available in this tier. For more details,
+ // see [the service tiers
+ // documentation](https://cloud.google.com/monitoring/workspaces/tiers).
+ ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1
+ // The Cloud Monitoring Premium tier, a higher, more expensive tier of service
+ // that provides access to all Cloud Monitoring features, lets you use Cloud
+ // Monitoring with AWS accounts, and has a larger allotments for logs and
+ // metrics. For more details, see [the service tiers
+ // documentation](https://cloud.google.com/monitoring/workspaces/tiers).
+ ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2
+)
+
+// Enum value maps for ServiceTier.
+var (
+ ServiceTier_name = map[int32]string{
+ 0: "SERVICE_TIER_UNSPECIFIED",
+ 1: "SERVICE_TIER_BASIC",
+ 2: "SERVICE_TIER_PREMIUM",
+ }
+ ServiceTier_value = map[string]int32{
+ "SERVICE_TIER_UNSPECIFIED": 0,
+ "SERVICE_TIER_BASIC": 1,
+ "SERVICE_TIER_PREMIUM": 2,
+ }
+)
+
+func (x ServiceTier) Enum() *ServiceTier {
+ p := new(ServiceTier)
+ *p = x
+ return p
+}
+
+func (x ServiceTier) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServiceTier) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor()
+}
+
+func (ServiceTier) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[1]
+}
+
+func (x ServiceTier) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServiceTier.Descriptor instead.
+func (ServiceTier) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1}
+}
+
+// The `Aligner` specifies the operation that will be applied to the data
+// points in each alignment period in a time series. Except for
+// `ALIGN_NONE`, which specifies that no operation be applied, each alignment
+// operation replaces the set of data values in each alignment period with
+// a single value: the result of applying the operation to the data values.
+// An aligned time series has a single data value at the end of each
+// `alignment_period`.
+//
+// An alignment operation can change the data type of the values, too. For
+// example, if you apply a counting operation to boolean values, the data
+// `value_type` in the original time series is `BOOLEAN`, but the `value_type`
+// in the aligned result is `INT64`.
+type Aggregation_Aligner int32
+
+const (
+ // No alignment. Raw data is returned. Not valid if cross-series reduction
+ // is requested. The `value_type` of the result is the same as the
+ // `value_type` of the input.
+ Aggregation_ALIGN_NONE Aggregation_Aligner = 0
+ // Align and convert to
+ // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA].
+ // The output is `delta = y1 - y0`.
+ //
+ // This alignment is valid for
+ // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and
+ // `DELTA` metrics. If the selected alignment period results in periods
+ // with no data, then the aligned value for such a period is created by
+ // interpolation. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_DELTA Aggregation_Aligner = 1
+ // Align and convert to a rate. The result is computed as
+ // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time".
+ // Think of this aligner as providing the slope of the line that passes
+ // through the value at the start and at the end of the `alignment_period`.
+ //
+ // This aligner is valid for `CUMULATIVE`
+ // and `DELTA` metrics with numeric values. If the selected alignment
+ // period results in periods with no data, then the aligned value for
+ // such a period is created by interpolation. The output is a `GAUGE`
+ // metric with `value_type` `DOUBLE`.
+ //
+ // If, by "rate", you mean "percentage change", see the
+ // `ALIGN_PERCENT_CHANGE` aligner instead.
+ Aggregation_ALIGN_RATE Aggregation_Aligner = 2
+ // Align by interpolating between adjacent points around the alignment
+ // period boundary. This aligner is valid for `GAUGE` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as the
+ // `value_type` of the input.
+ Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3
+ // Align by moving the most recent data point before the end of the
+ // alignment period to the boundary at the end of the alignment
+ // period. This aligner is valid for `GAUGE` metrics. The `value_type` of
+ // the aligned result is the same as the `value_type` of the input.
+ Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4
+ // Align the time series by returning the minimum value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_MIN Aggregation_Aligner = 10
+ // Align the time series by returning the maximum value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_MAX Aggregation_Aligner = 11
+ // Align the time series by returning the mean value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is `DOUBLE`.
+ Aggregation_ALIGN_MEAN Aggregation_Aligner = 12
+ // Align the time series by returning the number of values in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric or Boolean values. The `value_type` of the aligned result is
+ // `INT64`.
+ Aggregation_ALIGN_COUNT Aggregation_Aligner = 13
+ // Align the time series by returning the sum of the values in each
+ // alignment period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with numeric and distribution values. The `value_type` of the
+ // aligned result is the same as the `value_type` of the input.
+ Aggregation_ALIGN_SUM Aggregation_Aligner = 14
+ // Align the time series by returning the standard deviation of the values
+ // in each alignment period. This aligner is valid for `GAUGE` and
+ // `DELTA` metrics with numeric values. The `value_type` of the output is
+ // `DOUBLE`.
+ Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15
+ // Align the time series by returning the number of `True` values in
+ // each alignment period. This aligner is valid for `GAUGE` metrics with
+ // Boolean values. The `value_type` of the output is `INT64`.
+ Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16
+ // Align the time series by returning the number of `False` values in
+ // each alignment period. This aligner is valid for `GAUGE` metrics with
+ // Boolean values. The `value_type` of the output is `INT64`.
+ Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24
+ // Align the time series by returning the ratio of the number of `True`
+ // values to the total number of values in each alignment period. This
+ // aligner is valid for `GAUGE` metrics with Boolean values. The output
+ // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`.
+ Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 99th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 95th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 50th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 5th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21
+ // Align and convert to a percentage change. This aligner is valid for
+ // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns
+ // `((current - previous)/previous) * 100`, where the value of `previous` is
+ // determined based on the `alignment_period`.
+ //
+ // If the values of `current` and `previous` are both 0, then the returned
+ // value is 0. If only `previous` is 0, the returned value is infinity.
+ //
+ // A 10-minute moving mean is computed at each point of the alignment period
+ // prior to the above calculation to smooth the metric and prevent false
+ // positives from very short-lived spikes. The moving mean is only
+ // applicable for data whose values are `>= 0`. Any values `< 0` are
+ // treated as a missing datapoint, and are ignored. While `DELTA`
+ // metrics are accepted by this alignment, special care should be taken that
+ // the values for the metric will always be positive. The output is a
+ // `GAUGE` metric with `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23
+)
+
+// Enum value maps for Aggregation_Aligner.
+var (
+ Aggregation_Aligner_name = map[int32]string{
+ 0: "ALIGN_NONE",
+ 1: "ALIGN_DELTA",
+ 2: "ALIGN_RATE",
+ 3: "ALIGN_INTERPOLATE",
+ 4: "ALIGN_NEXT_OLDER",
+ 10: "ALIGN_MIN",
+ 11: "ALIGN_MAX",
+ 12: "ALIGN_MEAN",
+ 13: "ALIGN_COUNT",
+ 14: "ALIGN_SUM",
+ 15: "ALIGN_STDDEV",
+ 16: "ALIGN_COUNT_TRUE",
+ 24: "ALIGN_COUNT_FALSE",
+ 17: "ALIGN_FRACTION_TRUE",
+ 18: "ALIGN_PERCENTILE_99",
+ 19: "ALIGN_PERCENTILE_95",
+ 20: "ALIGN_PERCENTILE_50",
+ 21: "ALIGN_PERCENTILE_05",
+ 23: "ALIGN_PERCENT_CHANGE",
+ }
+ Aggregation_Aligner_value = map[string]int32{
+ "ALIGN_NONE": 0,
+ "ALIGN_DELTA": 1,
+ "ALIGN_RATE": 2,
+ "ALIGN_INTERPOLATE": 3,
+ "ALIGN_NEXT_OLDER": 4,
+ "ALIGN_MIN": 10,
+ "ALIGN_MAX": 11,
+ "ALIGN_MEAN": 12,
+ "ALIGN_COUNT": 13,
+ "ALIGN_SUM": 14,
+ "ALIGN_STDDEV": 15,
+ "ALIGN_COUNT_TRUE": 16,
+ "ALIGN_COUNT_FALSE": 24,
+ "ALIGN_FRACTION_TRUE": 17,
+ "ALIGN_PERCENTILE_99": 18,
+ "ALIGN_PERCENTILE_95": 19,
+ "ALIGN_PERCENTILE_50": 20,
+ "ALIGN_PERCENTILE_05": 21,
+ "ALIGN_PERCENT_CHANGE": 23,
+ }
+)
+
+func (x Aggregation_Aligner) Enum() *Aggregation_Aligner {
+ p := new(Aggregation_Aligner)
+ *p = x
+ return p
+}
+
+func (x Aggregation_Aligner) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor()
+}
+
+func (Aggregation_Aligner) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[2]
+}
+
+func (x Aggregation_Aligner) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Aggregation_Aligner.Descriptor instead.
+func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// A Reducer operation describes how to aggregate data points from multiple
+// time series into a single time series, where the value of each data point
+// in the resulting series is a function of all the already aligned values in
+// the input time series.
+type Aggregation_Reducer int32
+
+const (
+ // No cross-time series reduction. The output of the `Aligner` is
+ // returned.
+ Aggregation_REDUCE_NONE Aggregation_Reducer = 0
+ // Reduce by computing the mean value across time series for each
+ // alignment period. This reducer is valid for
+ // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and
+ // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with
+ // numeric or distribution values. The `value_type` of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_REDUCE_MEAN Aggregation_Reducer = 1
+ // Reduce by computing the minimum value across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric values. The `value_type` of the output is the same as the
+ // `value_type` of the input.
+ Aggregation_REDUCE_MIN Aggregation_Reducer = 2
+ // Reduce by computing the maximum value across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric values. The `value_type` of the output is the same as the
+ // `value_type` of the input.
+ Aggregation_REDUCE_MAX Aggregation_Reducer = 3
+ // Reduce by computing the sum across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric and distribution values. The `value_type` of the output is
+ // the same as the `value_type` of the input.
+ Aggregation_REDUCE_SUM Aggregation_Reducer = 4
+ // Reduce by computing the standard deviation across time series
+ // for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics with numeric or distribution values. The `value_type`
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5
+ // Reduce by computing the number of data points across time series
+ // for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of numeric, Boolean, distribution, and string
+ // `value_type`. The `value_type` of the output is `INT64`.
+ Aggregation_REDUCE_COUNT Aggregation_Reducer = 6
+ // Reduce by computing the number of `True`-valued data points across time
+ // series for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output
+ // is `INT64`.
+ Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7
+ // Reduce by computing the number of `False`-valued data points across time
+ // series for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output
+ // is `INT64`.
+ Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15
+ // Reduce by computing the ratio of the number of `True`-valued data points
+ // to the total number of data points for each alignment period. This
+ // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`.
+ // The output value is in the range [0.0, 1.0] and has `value_type`
+ // `DOUBLE`.
+ Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8
+ // Reduce by computing the [99th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9
+ // Reduce by computing the [95th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10
+ // Reduce by computing the [50th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11
+ // Reduce by computing the [5th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12
+)
+
+// Enum value maps for Aggregation_Reducer.
+var (
+ Aggregation_Reducer_name = map[int32]string{
+ 0: "REDUCE_NONE",
+ 1: "REDUCE_MEAN",
+ 2: "REDUCE_MIN",
+ 3: "REDUCE_MAX",
+ 4: "REDUCE_SUM",
+ 5: "REDUCE_STDDEV",
+ 6: "REDUCE_COUNT",
+ 7: "REDUCE_COUNT_TRUE",
+ 15: "REDUCE_COUNT_FALSE",
+ 8: "REDUCE_FRACTION_TRUE",
+ 9: "REDUCE_PERCENTILE_99",
+ 10: "REDUCE_PERCENTILE_95",
+ 11: "REDUCE_PERCENTILE_50",
+ 12: "REDUCE_PERCENTILE_05",
+ }
+ Aggregation_Reducer_value = map[string]int32{
+ "REDUCE_NONE": 0,
+ "REDUCE_MEAN": 1,
+ "REDUCE_MIN": 2,
+ "REDUCE_MAX": 3,
+ "REDUCE_SUM": 4,
+ "REDUCE_STDDEV": 5,
+ "REDUCE_COUNT": 6,
+ "REDUCE_COUNT_TRUE": 7,
+ "REDUCE_COUNT_FALSE": 15,
+ "REDUCE_FRACTION_TRUE": 8,
+ "REDUCE_PERCENTILE_99": 9,
+ "REDUCE_PERCENTILE_95": 10,
+ "REDUCE_PERCENTILE_50": 11,
+ "REDUCE_PERCENTILE_05": 12,
+ }
+)
+
+func (x Aggregation_Reducer) Enum() *Aggregation_Reducer {
+ p := new(Aggregation_Reducer)
+ *p = x
+ return p
+}
+
+func (x Aggregation_Reducer) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor()
+}
+
+func (Aggregation_Reducer) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[3]
+}
+
+func (x Aggregation_Reducer) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Aggregation_Reducer.Descriptor instead.
+func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1}
+}
+
+// A single strongly-typed value.
+type TypedValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The typed value field.
+ //
+ // Types that are assignable to Value:
+ //
+ // *TypedValue_BoolValue
+ // *TypedValue_Int64Value
+ // *TypedValue_DoubleValue
+ // *TypedValue_StringValue
+ // *TypedValue_DistributionValue
+ Value isTypedValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *TypedValue) Reset() {
+ *x = TypedValue{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TypedValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedValue) ProtoMessage() {}
+
+func (x *TypedValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead.
+func (*TypedValue) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *TypedValue) GetValue() isTypedValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *TypedValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*TypedValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *TypedValue) GetInt64Value() int64 {
+ if x, ok := x.GetValue().(*TypedValue_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *TypedValue) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *TypedValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*TypedValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *TypedValue) GetDistributionValue() *distribution.Distribution {
+ if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+type isTypedValue_Value interface {
+ isTypedValue_Value()
+}
+
+type TypedValue_BoolValue struct {
+ // A Boolean value: `true` or `false`.
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type TypedValue_Int64Value struct {
+ // A 64-bit integer. Its range is approximately ±9.2x1018.
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type TypedValue_DoubleValue struct {
+ // A 64-bit double-precision floating-point number. Its magnitude
+ // is approximately ±10±300 and it has 16
+ // significant digits of precision.
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type TypedValue_StringValue struct {
+ // A variable-length string value.
+ StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type TypedValue_DistributionValue struct {
+ // A distribution value.
+ DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+func (*TypedValue_BoolValue) isTypedValue_Value() {}
+
+func (*TypedValue_Int64Value) isTypedValue_Value() {}
+
+func (*TypedValue_DoubleValue) isTypedValue_Value() {}
+
+func (*TypedValue_StringValue) isTypedValue_Value() {}
+
+func (*TypedValue_DistributionValue) isTypedValue_Value() {}
+
+// Describes a time interval:
+//
+// - Reads: A half-open time interval. It includes the end time but
+// excludes the start time: `(startTime, endTime]`. The start time
+// must be specified, must be earlier than the end time, and should be
+// no older than the data retention period for the metric.
+// - Writes: A closed time interval. It extends from the start time to the end
+// time,
+// and includes both: `[startTime, endTime]`. Valid time intervals
+// depend on the
+// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind)
+// of the metric value. The end time must not be earlier than the start
+// time, and the end time must not be more than 25 hours in the past or more
+// than five minutes in the future.
+// - For `GAUGE` metrics, the `startTime` value is technically optional; if
+// no value is specified, the start time defaults to the value of the
+// end time, and the interval represents a single point in time. If both
+// start and end times are specified, they must be identical. Such an
+// interval is valid only for `GAUGE` metrics, which are point-in-time
+// measurements. The end time of a new interval must be at least a
+// millisecond after the end time of the previous interval.
+// - For `DELTA` metrics, the start time and end time must specify a
+// non-zero interval, with subsequent points specifying contiguous and
+// non-overlapping intervals. For `DELTA` metrics, the start time of
+// the next interval must be at least a millisecond after the end time
+// of the previous interval.
+// - For `CUMULATIVE` metrics, the start time and end time must specify a
+// non-zero interval, with subsequent points specifying the same
+// start time and increasing end times, until an event resets the
+// cumulative value to zero and sets a new start time for the following
+// points. The new start time must be at least a millisecond after the
+// end time of the previous interval.
+// - The start time of a new interval must be at least a millisecond after
+// the
+// end time of the previous interval because intervals are closed. If the
+// start time of a new interval is the same as the end time of the
+// previous interval, then data written at the new start time could
+// overwrite data written at the previous end time.
+type TimeInterval struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The end of the time interval.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // Optional. The beginning of the time interval. The default value
+ // for the start time is the end time. The start time must not be
+ // later than the end time.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+}
+
+func (x *TimeInterval) Reset() {
+ *x = TimeInterval{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeInterval) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeInterval) ProtoMessage() {}
+
+func (x *TimeInterval) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead.
+func (*TimeInterval) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+// Describes how to combine multiple time series to provide a different view of
+// the data. Aggregation of time series is done in two steps. First, each time
+// series in the set is _aligned_ to the same time interval boundaries, then the
+// set of time series is optionally _reduced_ in number.
+//
+// Alignment consists of applying the `per_series_aligner` operation
+// to each time series after its data has been divided into regular
+// `alignment_period` time intervals. This process takes _all_ of the data
+// points in an alignment period, applies a mathematical transformation such as
+// averaging, minimum, maximum, delta, etc., and converts them into a single
+// data point per period.
+//
+// Reduction is when the aligned and transformed time series can optionally be
+// combined, reducing the number of time series through similar mathematical
+// transformations. Reduction involves applying a `cross_series_reducer` to
+// all the time series, optionally sorting the time series into subsets with
+// `group_by_fields`, and applying the reducer to each subset.
+//
+// The raw time series data can contain a huge amount of information from
+// multiple sources. Alignment and reduction transforms this mass of data into
+// a more manageable and representative collection of data, for example "the
+// 95% latency across the average of all tasks in a cluster". This
+// representative data can be more easily graphed and comprehended, and the
+// individual time series data is still available for later drilldown. For more
+// details, see [Filtering and
+// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation).
+type Aggregation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `alignment_period` specifies a time interval, in seconds, that is used
+ // to divide the data in all the
+ // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of
+ // time. This will be done before the per-series aligner can be applied to
+ // the data.
+ //
+ // The value must be at least 60 seconds. If a per-series
+ // aligner other than `ALIGN_NONE` is specified, this field is required or an
+ // error is returned. If no per-series aligner is specified, or the aligner
+ // `ALIGN_NONE` is specified, then this field is ignored.
+ //
+ // The maximum value of the `alignment_period` is 104 weeks (2 years) for
+ // charts, and 90,000 seconds (25 hours) for alerting policies.
+ AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"`
+ // An `Aligner` describes how to bring the data points in a single
+ // time series into temporal alignment. Except for `ALIGN_NONE`, all
+ // alignments cause all the data points in an `alignment_period` to be
+ // mathematically grouped together, resulting in a single data point for
+ // each `alignment_period` with end timestamp at the end of the period.
+ //
+ // Not all alignment operations may be applied to all time series. The valid
+ // choices depend on the `metric_kind` and `value_type` of the original time
+ // series. Alignment can change the `metric_kind` or the `value_type` of
+ // the time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `cross_series_reducer` is specified, then
+ // `per_series_aligner` must be specified and not equal to `ALIGN_NONE`
+ // and `alignment_period` must be specified; otherwise, an error is
+ // returned.
+ PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"`
+ // The reduction operation to be used to combine time series into a single
+ // time series, where the value of each data point in the resulting series is
+ // a function of all the already aligned values in the input time series.
+ //
+ // Not all reducer operations can be applied to all time series. The valid
+ // choices depend on the `metric_kind` and the `value_type` of the original
+ // time series. Reduction can yield a time series with a different
+ // `metric_kind` or `value_type` than the input time series.
+ //
+ // Time series data must first be aligned (see `per_series_aligner`) in order
+ // to perform cross-time series reduction. If `cross_series_reducer` is
+ // specified, then `per_series_aligner` must be specified, and must not be
+ // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an
+ // error is returned.
+ CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"`
+ // The set of fields to preserve when `cross_series_reducer` is
+ // specified. The `group_by_fields` determine how the time series are
+ // partitioned into subsets prior to applying the aggregation
+ // operation. Each subset contains time series that have the same
+ // value for each of the grouping fields. Each individual time
+ // series is a member of exactly one subset. The
+ // `cross_series_reducer` is applied to each subset of time series.
+ // It is not possible to reduce across different resource types, so
+ // this field implicitly contains `resource.type`. Fields not
+ // specified in `group_by_fields` are aggregated away. If
+ // `group_by_fields` is not specified and all the time series have
+ // the same resource type, then the time series are aggregated into
+ // a single output time series. If `cross_series_reducer` is not
+ // defined, this field is ignored.
+ GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"`
+}
+
+func (x *Aggregation) Reset() {
+ *x = Aggregation{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Aggregation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Aggregation) ProtoMessage() {}
+
+func (x *Aggregation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead.
+func (*Aggregation) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.AlignmentPeriod
+ }
+ return nil
+}
+
+func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner {
+ if x != nil {
+ return x.PerSeriesAligner
+ }
+ return Aggregation_ALIGN_NONE
+}
+
+func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer {
+ if x != nil {
+ return x.CrossSeriesReducer
+ }
+ return Aggregation_REDUCE_NONE
+}
+
+func (x *Aggregation) GetGroupByFields() []string {
+ if x != nil {
+ return x.GroupByFields
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_common_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79,
+ 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09,
+ 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
+ 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54,
+ 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65,
+ 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07,
+ 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a,
+ 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14,
+ 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64,
+ 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65,
+ 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15,
+ 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c,
+ 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e,
+ 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41,
+ 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41,
+ 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10,
+ 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54,
+ 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47,
+ 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10,
+ 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45,
+ 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39,
+ 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52,
+ 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13,
+ 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45,
+ 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50,
+ 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22,
+ 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52,
+ 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a,
+ 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05,
+ 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54,
+ 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55,
+ 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44,
+ 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10,
+ 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52,
+ 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45,
+ 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f,
+ 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12,
+ 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e,
+ 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44,
+ 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30,
+ 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73,
+ 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52,
+ 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e,
+ 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49,
+ 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50,
+ 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43,
+ 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11,
+ 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10,
+ 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f,
+ 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54,
+ 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54,
+ 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45,
+ 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52,
+ 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55,
+ 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_common_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc
+)
+
+func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_common_proto_rawDescData
+}
+
+var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_google_monitoring_v3_common_proto_goTypes = []any{
+ (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType
+ (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier
+ (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner
+ (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer
+ (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue
+ (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval
+ (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation
+ (*distribution.Distribution)(nil), // 7: google.api.Distribution
+ (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 9: google.protobuf.Duration
+}
+var file_google_monitoring_v3_common_proto_depIdxs = []int32{
+ 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution
+ 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp
+ 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp
+ 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration
+ 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner
+ 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_common_proto_init() }
+func file_google_monitoring_v3_common_proto_init() {
+ if File_google_monitoring_v3_common_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{
+ (*TypedValue_BoolValue)(nil),
+ (*TypedValue_Int64Value)(nil),
+ (*TypedValue_DoubleValue)(nil),
+ (*TypedValue_StringValue)(nil),
+ (*TypedValue_DistributionValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_common_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_common_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_common_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_common_proto = out.File
+ file_google_monitoring_v3_common_proto_rawDesc = nil
+ file_google_monitoring_v3_common_proto_goTypes = nil
+ file_google_monitoring_v3_common_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
new file mode 100644
index 000000000..0c3ac5a1c
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
@@ -0,0 +1,181 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/dropped_labels.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A set of (label, value) pairs that were removed from a Distribution
+// time series during aggregation and then added as an attachment to a
+// Distribution.Exemplar.
+//
+// The full label set for the exemplars is constructed by using the dropped
+// pairs in combination with the label values that remain on the aggregated
+// Distribution time series. The constructed full label set can be used to
+// identify the specific entity, such as the instance or job, which might be
+// contributing to a long-tail. However, with dropped labels, the storage
+// requirements are reduced because only the aggregated distribution values for
+// a large group of time series are stored.
+//
+// Note that there are no guarantees on ordering of the labels from
+// exemplar-to-exemplar and from distribution-to-distribution in the same
+// stream, and there may be duplicates. It is up to clients to resolve any
+// ambiguities.
+type DroppedLabels struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Map from label to its value, for all labels dropped in any aggregation.
+ Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *DroppedLabels) Reset() {
+ *x = DroppedLabels{}
+ mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DroppedLabels) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DroppedLabels) ProtoMessage() {}
+
+func (x *DroppedLabels) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead.
+func (*DroppedLabels) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DroppedLabels) GetLabel() map[string]string {
+ if x != nil {
+ return x.Label
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc
+)
+
+func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_dropped_labels_proto_rawDescData
+}
+
+var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{
+ (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels
+ nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry
+}
+var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_dropped_labels_proto_init() }
+func file_google_monitoring_v3_dropped_labels_proto_init() {
+ if File_google_monitoring_v3_dropped_labels_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_dropped_labels_proto = out.File
+ file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil
+ file_google_monitoring_v3_dropped_labels_proto_goTypes = nil
+ file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
new file mode 100644
index 000000000..c35046ac7
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
@@ -0,0 +1,249 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/group.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The description of a dynamic collection of monitored resources. Each group
+// has a filter that is matched against monitored resources and their associated
+// metadata. If a group's filter matches an available monitored resource, then
+// that resource is a member of that group. Groups can contain any number of
+// monitored resources, and each monitored resource can be a member of any
+// number of groups.
+//
+// Groups can be nested in parent-child hierarchies. The `parentName` field
+// identifies an optional parent for each group. If a group has a parent, then
+// the only monitored resources available to be matched by the group's filter
+// are the resources contained in the parent group. In other words, a group
+// contains the monitored resources that match its filter and the filters of all
+// the group's ancestors. A group without a parent can contain any monitored
+// resource.
+//
+// For example, consider an infrastructure running a set of instances with two
+// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
+// `environment="production"`. A child of that parent group has a filter,
+// `role="transcoder"`. The parent group contains all instances in the
+// production environment, regardless of their roles. The child group contains
+// instances that have the transcoder role *and* are in the production
+// environment.
+//
+// The monitored resources contained in a group can change at any moment,
+// depending on what resources exist and what filters are associated with the
+// group and its ancestors.
+type Group struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The name of this group. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // When creating a group, this field is ignored and a new name is created
+ // consisting of the project specified in the call to `CreateGroup`
+ // and a unique `[GROUP_ID]` that is generated automatically.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A user-assigned name for this group, used only for display purposes.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The name of the group's parent, if it has one. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // For groups with no parent, `parent_name` is the empty string, `""`.
+ ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
+ // The filter used to determine which monitored resources belong to this
+ // group.
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // If true, the members of this group are considered to be a cluster.
+ // The system can perform additional analysis on groups that are clusters.
+ IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"`
+}
+
+func (x *Group) Reset() {
+ *x = Group{}
+ mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Group) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Group) ProtoMessage() {}
+
+func (x *Group) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Group.ProtoReflect.Descriptor instead.
+func (*Group) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Group) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Group) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *Group) GetParentName() string {
+ if x != nil {
+ return x.ParentName
+ }
+ return ""
+}
+
+func (x *Group) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *Group) GetIsCluster() bool {
+ if x != nil {
+ return x.IsCluster
+ }
+ return false
+}
+
+var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_group_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea,
+ 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f,
+ 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33,
+ 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_group_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc
+)
+
+func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_group_proto_rawDescData
+}
+
+var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_group_proto_goTypes = []any{
+ (*Group)(nil), // 0: google.monitoring.v3.Group
+}
+var file_google_monitoring_v3_group_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_group_proto_init() }
+func file_google_monitoring_v3_group_proto_init() {
+ if File_google_monitoring_v3_group_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_group_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_group_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_group_proto = out.File
+ file_google_monitoring_v3_group_proto_rawDesc = nil
+ file_google_monitoring_v3_group_proto_goTypes = nil
+ file_google_monitoring_v3_group_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
new file mode 100644
index 000000000..fbdf9ef54
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
@@ -0,0 +1,1205 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/group_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `ListGroup` request.
+type ListGroupsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // groups are to be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional filter consisting of a single group name. The filters limit
+ // the groups returned based on their parent-child relationship with the
+ // specified group. If no filter is specified, all groups are returned.
+ //
+ // Types that are assignable to Filter:
+ //
+ // *ListGroupsRequest_ChildrenOfGroup
+ // *ListGroupsRequest_AncestorsOfGroup
+ // *ListGroupsRequest_DescendantsOfGroup
+ Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `next_page_token` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListGroupsRequest) Reset() {
+ *x = ListGroupsRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupsRequest) ProtoMessage() {}
+
+func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead.
+func (*ListGroupsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListGroupsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (x *ListGroupsRequest) GetChildrenOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok {
+ return x.ChildrenOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetAncestorsOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok {
+ return x.AncestorsOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetDescendantsOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok {
+ return x.DescendantsOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListGroupsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+type isListGroupsRequest_Filter interface {
+ isListGroupsRequest_Filter()
+}
+
+type ListGroupsRequest_ChildrenOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns groups whose `parent_name` field contains the group
+ // name. If no groups have this parent, the results are empty.
+ ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_AncestorsOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns groups that are ancestors of the specified group.
+ // The groups are returned in order, starting with the immediate parent and
+ // ending with the most distant ancestor. If the specified group has no
+ // immediate parent, the results are empty.
+ AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_DescendantsOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns the descendants of the specified group. This is a superset of
+ // the results returned by the `children_of_group` filter, and includes
+ // children-of-children, and so forth.
+ DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"`
+}
+
+func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {}
+
+// The `ListGroups` response.
+type ListGroupsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The groups that match the specified filters.
+ Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListGroupsResponse) Reset() {
+ *x = ListGroupsResponse{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupsResponse) ProtoMessage() {}
+
+func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead.
+func (*ListGroupsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListGroupsResponse) GetGroup() []*Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *ListGroupsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetGroup` request.
+type GetGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetGroupRequest) Reset() {
+ *x = GetGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetGroupRequest) ProtoMessage() {}
+
+func (x *GetGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead.
+func (*GetGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateGroup` request.
+type CreateGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the group. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. A group definition. It is an error to define the `name` field
+ // because the system assigns the name.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not create the group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *CreateGroupRequest) Reset() {
+ *x = CreateGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateGroupRequest) ProtoMessage() {}
+
+func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead.
+func (*CreateGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateGroupRequest) GetGroup() *Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *CreateGroupRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The `UpdateGroup` request.
+type UpdateGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The new definition of the group. All fields of the existing
+ // group, excepting `name`, are replaced with the corresponding fields of this
+ // group.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not update the existing group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *UpdateGroupRequest) Reset() {
+ *x = UpdateGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateGroupRequest) ProtoMessage() {}
+
+func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead.
+func (*UpdateGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateGroupRequest) GetGroup() *Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *UpdateGroupRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The `DeleteGroup` request. The default behavior is to be able to delete a
+// single group without any descendants.
+type DeleteGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // If this field is true, then the request means to delete a group with all
+ // its descendants. Otherwise, the request means to delete a group only when
+ // it has no descendants. The default value is false.
+ Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"`
+}
+
+func (x *DeleteGroupRequest) Reset() {
+ *x = DeleteGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteGroupRequest) ProtoMessage() {}
+
+func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead.
+func (*DeleteGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteGroupRequest) GetRecursive() bool {
+ if x != nil {
+ return x.Recursive
+ }
+ return false
+}
+
+// The `ListGroupMembers` request.
+type ListGroupMembersRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group whose members are listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `next_page_token` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // An optional [list
+ // filter](https://cloud.google.com/monitoring/api/learn_more#filtering)
+ // describing the members to be returned. The filter may reference the type,
+ // labels, and metadata of monitored resources that comprise the group. For
+ // example, to return only resources representing Compute Engine VM instances,
+ // use this filter:
+ //
+ // `resource.type = "gce_instance"`
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // An optional time interval for which results should be returned. Only
+ // members that were part of the group during the specified interval are
+ // included in the response. If no interval is provided then the group
+ // membership over the last minute is returned.
+ Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"`
+}
+
+func (x *ListGroupMembersRequest) Reset() {
+ *x = ListGroupMembersRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupMembersRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupMembersRequest) ProtoMessage() {}
+
+func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead.
+func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListGroupMembersRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListGroupMembersRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+// The `ListGroupMembers` response.
+type ListGroupMembersResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A set of monitored resources in the group.
+ Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"`
+ // If there are more results than have been returned, then this field is
+ // set to a non-empty value. To see the additional results, use that value as
+ // `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of elements matching this request.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListGroupMembersResponse) Reset() {
+ *x = ListGroupMembersResponse{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupMembersResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupMembersResponse) ProtoMessage() {}
+
+func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead.
+func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource {
+ if x != nil {
+ return x.Members
+ }
+ return nil
+}
+
+func (x *ListGroupMembersResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListGroupMembersResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76,
+ 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a,
+ 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00,
+ 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f,
+ 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa,
+ 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73,
+ 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65,
+ 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64,
+ 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a,
+ 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f,
+ 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01,
+ 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65,
+ 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a,
+ 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
+ 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0xda,
+ 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x25, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b,
+ 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x3a, 0x05,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0b, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0xda, 0x41,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x10,
+ 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73,
+ 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70,
+ 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x35, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d,
+ 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70,
+ 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65,
+ 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56,
+ 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56,
+ 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_group_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_google_monitoring_v3_group_service_proto_goTypes = []any{
+ (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest
+ (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse
+ (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest
+ (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest
+ (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest
+ (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest
+ (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest
+ (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse
+ (*Group)(nil), // 8: google.monitoring.v3.Group
+ (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval
+ (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource
+ (*emptypb.Empty)(nil), // 11: google.protobuf.Empty
+}
+var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{
+ 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group
+ 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group
+ 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group
+ 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval
+ 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource
+ 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest
+ 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest
+ 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest
+ 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest
+ 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest
+ 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest
+ 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse
+ 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group
+ 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group
+ 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group
+ 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty
+ 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse
+ 11, // [11:17] is the sub-list for method output_type
+ 5, // [5:11] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_group_service_proto_init() }
+func file_google_monitoring_v3_group_service_proto_init() {
+ if File_google_monitoring_v3_group_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_group_proto_init()
+ file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{
+ (*ListGroupsRequest_ChildrenOfGroup)(nil),
+ (*ListGroupsRequest_AncestorsOfGroup)(nil),
+ (*ListGroupsRequest_DescendantsOfGroup)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_group_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_group_service_proto = out.File
+ file_google_monitoring_v3_group_service_proto_rawDesc = nil
+ file_google_monitoring_v3_group_service_proto_goTypes = nil
+ file_google_monitoring_v3_group_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// GroupServiceClient is the client API for GroupService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GroupServiceClient interface {
+ // Lists the existing groups.
+ ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Creates a new group.
+ CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error)
+}
+
+type groupServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient {
+ return &groupServiceClient{cc}
+}
+
+func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) {
+ out := new(ListGroupsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) {
+ out := new(ListGroupMembersResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GroupServiceServer is the server API for GroupService service.
+type GroupServiceServer interface {
+ // Lists the existing groups.
+ ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(context.Context, *GetGroupRequest) (*Group, error)
+ // Creates a new group.
+ CreateGroup(context.Context, *CreateGroupRequest) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error)
+}
+
+// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedGroupServiceServer struct {
+}
+
+func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented")
+}
+func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented")
+}
+
+func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) {
+ s.RegisterService(&_GroupService_serviceDesc, srv)
+}
+
+func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroups(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroups",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).GetGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/GetGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).CreateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/CreateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupMembersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _GroupService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.GroupService",
+ HandlerType: (*GroupServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListGroups",
+ Handler: _GroupService_ListGroups_Handler,
+ },
+ {
+ MethodName: "GetGroup",
+ Handler: _GroupService_GetGroup_Handler,
+ },
+ {
+ MethodName: "CreateGroup",
+ Handler: _GroupService_CreateGroup_Handler,
+ },
+ {
+ MethodName: "UpdateGroup",
+ Handler: _GroupService_UpdateGroup_Handler,
+ },
+ {
+ MethodName: "DeleteGroup",
+ Handler: _GroupService_DeleteGroup_Handler,
+ },
+ {
+ MethodName: "ListGroupMembers",
+ Handler: _GroupService_ListGroupMembers_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/group_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
new file mode 100644
index 000000000..ae7eea5b6
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
@@ -0,0 +1,1067 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/metric.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ label "google.golang.org/genproto/googleapis/api/label"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A single data point in a time series.
+type Point struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time interval to which the data point applies. For `GAUGE` metrics,
+ // the start time is optional, but if it is supplied, it must equal the
+ // end time. For `DELTA` metrics, the start
+ // and end time should specify a non-zero interval, with subsequent points
+ // specifying contiguous and non-overlapping intervals. For `CUMULATIVE`
+ // metrics, the start and end time should specify a non-zero interval, with
+ // subsequent points specifying the same start time and increasing end times,
+ // until an event resets the cumulative value to zero and sets a new start
+ // time for the following points.
+ Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
+ // The value of the data point.
+ Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Point) Reset() {
+ *x = Point{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Point) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Point) ProtoMessage() {}
+
+func (x *Point) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Point.ProtoReflect.Descriptor instead.
+func (*Point) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Point) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *Point) GetValue() *TypedValue {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric. A time series is identified by a combination of a
+// fully-specified monitored resource and a fully-specified metric.
+// This type is used for both listing and creating time series.
+type TimeSeries struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The associated metric. A fully-specified metric used to identify the time
+ // series.
+ Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"`
+ // The associated monitored resource. Custom metrics can use only certain
+ // monitored resource types in their time series data. For more information,
+ // see [Monitored resources for custom
+ // metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources).
+ Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Output only. The associated monitored resource metadata. When reading a
+ // time series, this field will include metadata labels that are explicitly
+ // named in the reduction. When creating a time series, this field is ignored.
+ Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // The metric kind of the time series. When listing time series, this metric
+ // kind might be different from the metric kind of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the metric kind of the associated metric. If the associated
+ // metric's descriptor must be auto-created, then this field specifies the
+ // metric kind of the new descriptor and must be either `GAUGE` (the default)
+ // or `CUMULATIVE`.
+ MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // The value type of the time series. When listing time series, this value
+ // type might be different from the value type of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the type of the data in the `points` field.
+ ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The data points of this time series. When listing time series, points are
+ // returned in reverse time order.
+ //
+ // When creating a time series, this field must contain exactly one point and
+ // the point's type must be the same as the value type of the associated
+ // metric. If the associated metric's descriptor must be auto-created, then
+ // the value type of the descriptor is determined by the point's type, which
+ // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.
+ Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"`
+ // The units in which the metric value is reported. It is only applicable
+ // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`
+ // defines the representation of the stored metric values. This field can only
+ // be changed through CreateTimeSeries when it is empty.
+ Unit string `protobuf:"bytes,8,opt,name=unit,proto3" json:"unit,omitempty"`
+ // Input only. A detailed description of the time series that will be
+ // associated with the
+ // [google.api.MetricDescriptor][google.api.MetricDescriptor] for the metric.
+ // Once set, this field cannot be changed through CreateTimeSeries.
+ Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *TimeSeries) Reset() {
+ *x = TimeSeries{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeries) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeries) ProtoMessage() {}
+
+func (x *TimeSeries) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead.
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TimeSeries) GetMetric() *metric.Metric {
+ if x != nil {
+ return x.Metric
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind {
+ if x != nil {
+ return x.MetricKind
+ }
+ return metric.MetricDescriptor_MetricKind(0)
+}
+
+func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType {
+ if x != nil {
+ return x.ValueType
+ }
+ return metric.MetricDescriptor_ValueType(0)
+}
+
+func (x *TimeSeries) GetPoints() []*Point {
+ if x != nil {
+ return x.Points
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+func (x *TimeSeries) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A descriptor for the labels and points in a time series.
+type TimeSeriesDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Descriptors for the labels.
+ LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"`
+ // Descriptors for the point data value columns.
+ PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"`
+}
+
+func (x *TimeSeriesDescriptor) Reset() {
+ *x = TimeSeriesDescriptor{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesDescriptor) ProtoMessage() {}
+
+func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead.
+func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor {
+ if x != nil {
+ return x.LabelDescriptors
+ }
+ return nil
+}
+
+func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor {
+ if x != nil {
+ return x.PointDescriptors
+ }
+ return nil
+}
+
+// Represents the values of a time series associated with a
+// TimeSeriesDescriptor.
+type TimeSeriesData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The values of the labels in the time series identifier, given in the same
+ // order as the `label_descriptors` field of the TimeSeriesDescriptor
+ // associated with this object. Each value must have a value of the type
+ // given in the corresponding entry of `label_descriptors`.
+ LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
+ // The points in the time series.
+ PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"`
+}
+
+func (x *TimeSeriesData) Reset() {
+ *x = TimeSeriesData{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesData) ProtoMessage() {}
+
+func (x *TimeSeriesData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead.
+func (*TimeSeriesData) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TimeSeriesData) GetLabelValues() []*LabelValue {
+ if x != nil {
+ return x.LabelValues
+ }
+ return nil
+}
+
+func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData {
+ if x != nil {
+ return x.PointData
+ }
+ return nil
+}
+
+// A label value.
+type LabelValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The label value can be a bool, int64, or string.
+ //
+ // Types that are assignable to Value:
+ //
+ // *LabelValue_BoolValue
+ // *LabelValue_Int64Value
+ // *LabelValue_StringValue
+ Value isLabelValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *LabelValue) Reset() {
+ *x = LabelValue{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *LabelValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelValue) ProtoMessage() {}
+
+func (x *LabelValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead.
+func (*LabelValue) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4}
+}
+
+func (m *LabelValue) GetValue() isLabelValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *LabelValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*LabelValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *LabelValue) GetInt64Value() int64 {
+ if x, ok := x.GetValue().(*LabelValue_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *LabelValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*LabelValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+type isLabelValue_Value interface {
+ isLabelValue_Value()
+}
+
+type LabelValue_BoolValue struct {
+ // A bool label value.
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type LabelValue_Int64Value struct {
+ // An int64 label value.
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type LabelValue_StringValue struct {
+ // A string label value.
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+func (*LabelValue_BoolValue) isLabelValue_Value() {}
+
+func (*LabelValue_Int64Value) isLabelValue_Value() {}
+
+func (*LabelValue_StringValue) isLabelValue_Value() {}
+
+// An error associated with a query in the time series query language format.
+type QueryError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The location of the time series query language text that this error applies
+ // to.
+ Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"`
+ // The error message.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *QueryError) Reset() {
+ *x = QueryError{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryError) ProtoMessage() {}
+
+func (x *QueryError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryError.ProtoReflect.Descriptor instead.
+func (*QueryError) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *QueryError) GetLocator() *TextLocator {
+ if x != nil {
+ return x.Locator
+ }
+ return nil
+}
+
+func (x *QueryError) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+// A locator for text. Indicates a particular part of the text of a request or
+// of an object referenced in the request.
+//
+// For example, suppose the request field `text` contains:
+//
+// text: "The quick brown fox jumps over the lazy dog."
+//
+// Then the locator:
+//
+// source: "text"
+// start_position {
+// line: 1
+// column: 17
+// }
+// end_position {
+// line: 1
+// column: 19
+// }
+//
+// refers to the part of the text: "fox".
+type TextLocator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The source of the text. The source may be a field in the request, in which
+ // case its format is the format of the
+ // google.rpc.BadRequest.FieldViolation.field field in
+ // https://cloud.google.com/apis/design/errors#error_details. It may also be
+ // be a source other than the request field (e.g. a macro definition
+ // referenced in the text of the query), in which case this is the name of
+ // the source (e.g. the macro name).
+ Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
+ // The position of the first byte within the text.
+ StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"`
+ // The position of the last byte within the text.
+ EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"`
+ // If `source`, `start_position`, and `end_position` describe a call on
+ // some object (e.g. a macro in the time series query language text) and a
+ // location is to be designated in that object's text, `nested_locator`
+ // identifies the location within that object.
+ NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"`
+ // When `nested_locator` is set, this field gives the reason for the nesting.
+ // Usually, the reason is a macro invocation. In that case, the macro name
+ // (including the leading '@') signals the location of the macro call
+ // in the text and a macro argument name (including the leading '$') signals
+ // the location of the macro argument inside the macro body that got
+ // substituted away.
+ NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"`
+}
+
+func (x *TextLocator) Reset() {
+ *x = TextLocator{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TextLocator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TextLocator) ProtoMessage() {}
+
+func (x *TextLocator) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead.
+func (*TextLocator) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *TextLocator) GetSource() string {
+ if x != nil {
+ return x.Source
+ }
+ return ""
+}
+
+func (x *TextLocator) GetStartPosition() *TextLocator_Position {
+ if x != nil {
+ return x.StartPosition
+ }
+ return nil
+}
+
+func (x *TextLocator) GetEndPosition() *TextLocator_Position {
+ if x != nil {
+ return x.EndPosition
+ }
+ return nil
+}
+
+func (x *TextLocator) GetNestedLocator() *TextLocator {
+ if x != nil {
+ return x.NestedLocator
+ }
+ return nil
+}
+
+func (x *TextLocator) GetNestingReason() string {
+ if x != nil {
+ return x.NestingReason
+ }
+ return ""
+}
+
+// A descriptor for the value columns in a data point.
+type TimeSeriesDescriptor_ValueDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value key.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value type.
+ ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The value stream kind.
+ MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // The unit in which `time_series` point values are reported. `unit`
+ // follows the UCUM format for units as seen in
+ // https://unitsofmeasure.org/ucum.html.
+ // `unit` is only valid if `value_type` is INTEGER, DOUBLE, DISTRIBUTION.
+ Unit string `protobuf:"bytes,4,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() {
+ *x = TimeSeriesDescriptor_ValueDescriptor{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead.
+func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType {
+ if x != nil {
+ return x.ValueType
+ }
+ return metric.MetricDescriptor_ValueType(0)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind {
+ if x != nil {
+ return x.MetricKind
+ }
+ return metric.MetricDescriptor_MetricKind(0)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+// A point's value columns and time interval. Each point has one or more
+// point values corresponding to the entries in `point_descriptors` field in
+// the TimeSeriesDescriptor associated with this object.
+type TimeSeriesData_PointData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The values that make up the point.
+ Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // The time interval associated with the point.
+ TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"`
+}
+
+func (x *TimeSeriesData_PointData) Reset() {
+ *x = TimeSeriesData_PointData{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesData_PointData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesData_PointData) ProtoMessage() {}
+
+func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead.
+func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *TimeSeriesData_PointData) GetValues() []*TypedValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval {
+ if x != nil {
+ return x.TimeInterval
+ }
+ return nil
+}
+
+// The position of a byte within the text.
+type TextLocator_Position struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The line, starting with 1, where the byte is positioned.
+ Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"`
+ // The column within the line, starting with 1, where the byte is
+ // positioned. This is a byte index even though the text is UTF-8.
+ Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *TextLocator_Position) Reset() {
+ *x = TextLocator_Position{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TextLocator_Position) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TextLocator_Position) ProtoMessage() {}
+
+func (x *TextLocator_Position) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead.
+func (*TextLocator_Position) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *TextLocator_Position) GetLine() int32 {
+ if x != nil {
+ return x.Line
+ }
+ return 0
+}
+
+func (x *TextLocator_Position) GetColumn() int32 {
+ if x != nil {
+ return x.Column
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_metric_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x03, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a,
+ 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x12, 0x48, 0x0a, 0x11, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x52, 0x10, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52,
+ 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75,
+ 0x6e, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22,
+ 0xb5, 0x02, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74,
+ 0x61, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x47,
+ 0x0a, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65,
+ 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf0, 0x02, 0x0a,
+ 0x0b, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x50, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64,
+ 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x67, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42,
+ 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76,
+ 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70,
+ 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02,
+ 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc
+)
+
+func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_metric_proto_rawDescData
+}
+
+var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_google_monitoring_v3_metric_proto_goTypes = []any{
+ (*Point)(nil), // 0: google.monitoring.v3.Point
+ (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries
+ (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor
+ (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData
+ (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue
+ (*QueryError)(nil), // 5: google.monitoring.v3.QueryError
+ (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator
+ (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor
+ (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData
+ (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position
+ (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval
+ (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue
+ (*metric.Metric)(nil), // 12: google.api.Metric
+ (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource
+ (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata
+ (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind
+ (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType
+ (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor
+}
+var file_google_monitoring_v3_metric_proto_depIdxs = []int32{
+ 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval
+ 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue
+ 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric
+ 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource
+ 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata
+ 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
+ 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType
+ 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point
+ 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor
+ 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor
+ 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue
+ 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData
+ 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator
+ 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position
+ 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position
+ 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator
+ 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
+ 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
+ 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue
+ 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_metric_proto_init() }
+func file_google_monitoring_v3_metric_proto_init() {
+ if File_google_monitoring_v3_metric_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{
+ (*LabelValue_BoolValue)(nil),
+ (*LabelValue_Int64Value)(nil),
+ (*LabelValue_StringValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 10,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_metric_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_metric_proto = out.File
+ file_google_monitoring_v3_metric_proto_rawDesc = nil
+ file_google_monitoring_v3_metric_proto_goTypes = nil
+ file_google_monitoring_v3_metric_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
new file mode 100644
index 000000000..39b959524
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
@@ -0,0 +1,2293 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/metric_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status1 "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Controls which fields are returned by `ListTimeSeries*`.
+type ListTimeSeriesRequest_TimeSeriesView int32
+
+const (
+ // Returns the identity of the metric(s), the time series,
+ // and the time series data.
+ ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0
+ // Returns the identity of the metric and the time series resource,
+ // but not the time series data.
+ ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1
+)
+
+// Enum value maps for ListTimeSeriesRequest_TimeSeriesView.
+var (
+ ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{
+ 0: "FULL",
+ 1: "HEADERS",
+ }
+ ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{
+ "FULL": 0,
+ "HEADERS": 1,
+ }
+)
+
+func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView {
+ p := new(ListTimeSeriesRequest_TimeSeriesView)
+ *p = x
+ return p
+}
+
+func (x ListTimeSeriesRequest_TimeSeriesView) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor()
+}
+
+func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_metric_service_proto_enumTypes[0]
+}
+
+func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead.
+func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+// The `ListMonitoredResourceDescriptors` request.
+type ListMonitoredResourceDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // describing the descriptors to be returned. The filter can reference the
+ // descriptor's type and labels. For example, the following filter returns
+ // only Google Compute Engine descriptors that have an `id` label:
+ //
+ // resource.type = starts_with("gce_") AND resource.label:id
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) Reset() {
+ *x = ListMonitoredResourceDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListMonitoredResourceDescriptors` response.
+type ListMonitoredResourceDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The monitored resource descriptors that are available to this project
+ // and that match `filter`, if present.
+ ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) Reset() {
+ *x = ListMonitoredResourceDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor {
+ if x != nil {
+ return x.ResourceDescriptors
+ }
+ return nil
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMonitoredResourceDescriptor` request.
+type GetMonitoredResourceDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The monitored resource descriptor to get. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE]
+ //
+ // The `[RESOURCE_TYPE]` is a predefined type, such as
+ // `cloudsql_database`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) Reset() {
+ *x = GetMonitoredResourceDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {}
+
+func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListMetricDescriptors` request.
+type ListMetricDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If this field is empty, all custom and
+ // system-defined metric descriptors are returned.
+ // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifies which metric descriptors are to be
+ // returned. For example, the following filter matches all
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics):
+ //
+ // metric.type = starts_with("custom.googleapis.com/")
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A positive number that is the maximum number of results to
+ // return. The default and maximum value is 10,000. If a page_size <= 0 or >
+ // 10,000 is submitted, will instead return a maximum of 10,000 results.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If this field is not empty then it must contain the
+ // `nextPageToken` value returned by a previous call to this method. Using
+ // this field causes the method to return additional results from the previous
+ // method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Optional. If true, only metrics and monitored resource types that have
+ // recent data (within roughly 25 hours) will be included in the response.
+ // - If a metric descriptor enumerates monitored resource types, only the
+ // monitored resource types for which the metric type has recent data will
+ // be included in the returned metric descriptor, and if none of them have
+ // recent data, the metric descriptor will not be returned.
+ // - If a metric descriptor does not enumerate the compatible monitored
+ // resource types, it will be returned only if the metric type has recent
+ // data for some monitored resource type. The returned descriptor will not
+ // enumerate any monitored resource types.
+ ActiveOnly bool `protobuf:"varint,6,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"`
+}
+
+func (x *ListMetricDescriptorsRequest) Reset() {
+ *x = ListMetricDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMetricDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMetricDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListMetricDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListMetricDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetActiveOnly() bool {
+ if x != nil {
+ return x.ActiveOnly
+ }
+ return false
+}
+
+// The `ListMetricDescriptors` response.
+type ListMetricDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The metric descriptors that are available to the project
+ // and that match the value of `filter`, if present.
+ MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListMetricDescriptorsResponse) Reset() {
+ *x = ListMetricDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMetricDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMetricDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor {
+ if x != nil {
+ return x.MetricDescriptors
+ }
+ return nil
+}
+
+func (x *ListMetricDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMetricDescriptor` request.
+type GetMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The metric descriptor on which to execute the request. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
+ //
+ // An example value of `[METRIC_ID]` is
+ // `"compute.googleapis.com/instance/disk/read_bytes_count"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetMetricDescriptorRequest) Reset() {
+ *x = GetMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *GetMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateMetricDescriptor` request.
+type CreateMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ // 4
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The new [custom
+ // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor.
+ MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
+}
+
+func (x *CreateMetricDescriptorRequest) Reset() {
+ *x = CreateMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *CreateMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor {
+ if x != nil {
+ return x.MetricDescriptor
+ }
+ return nil
+}
+
+// The `DeleteMetricDescriptor` request.
+type DeleteMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The metric descriptor on which to execute the request. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
+ //
+ // An example of `[METRIC_ID]` is:
+ // `"custom.googleapis.com/my_test_metric"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteMetricDescriptorRequest) Reset() {
+ *x = DeleteMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DeleteMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListTimeSeries` request.
+type ListTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name),
+ // organization or folder on which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ // organizations/[ORGANIZATION_ID]
+ // folders/[FOLDER_ID]
+ Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. A [monitoring
+ // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies
+ // which time series should be returned. The filter must specify a single
+ // metric type, and can additionally specify metric labels and other
+ // information. For example:
+ //
+ // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
+ // metric.labels.instance_name = "my-instance-name"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Required. The time interval for which results should be returned. Only time
+ // series that contain data points in the specified interval are included in
+ // the response.
+ Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series across specified labels.
+ //
+ // By default (if no `aggregation` is explicitly specified), the raw time
+ // series data is returned.
+ Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"`
+ // Apply a second aggregation after `aggregation` is applied. May only be
+ // specified if `aggregation` is specified.
+ SecondaryAggregation *Aggregation `protobuf:"bytes,11,opt,name=secondary_aggregation,json=secondaryAggregation,proto3" json:"secondary_aggregation,omitempty"`
+ // Unsupported: must be left blank. The points in each time series are
+ // currently returned in reverse time order (most recent to oldest).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Required. Specifies which information is returned about the time series.
+ View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"`
+ // A positive number that is the maximum number of results to return. If
+ // `page_size` is empty or more than 100,000 results, the effective
+ // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the
+ // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is
+ // the maximum number of `TimeSeries` returned.
+ PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListTimeSeriesRequest) Reset() {
+ *x = ListTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListTimeSeriesRequest) ProtoMessage() {}
+
+func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ListTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation {
+ if x != nil {
+ return x.Aggregation
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetSecondaryAggregation() *Aggregation {
+ if x != nil {
+ return x.SecondaryAggregation
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView {
+ if x != nil {
+ return x.View
+ }
+ return ListTimeSeriesRequest_FULL
+}
+
+func (x *ListTimeSeriesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListTimeSeriesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListTimeSeries` response.
+type ListTimeSeriesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // One or more time series that match the filter included in the request.
+ TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Query execution errors that may have caused the time series data returned
+ // to be incomplete.
+ ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"`
+ // The unit in which all `time_series` point values are reported. `unit`
+ // follows the UCUM format for units as seen in
+ // https://unitsofmeasure.org/ucum.html.
+ // If different `time_series` have different units (for example, because they
+ // come from different metric types, or a unit is absent), then `unit` will be
+ // "{not_a_unit}".
+ Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (x *ListTimeSeriesResponse) Reset() {
+ *x = ListTimeSeriesResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListTimeSeriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListTimeSeriesResponse) ProtoMessage() {}
+
+func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead.
+func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status {
+ if x != nil {
+ return x.ExecutionErrors
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesResponse) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+// The `CreateTimeSeries` request.
+type CreateTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The new data to be added to a list of time series.
+ // Adds at most one data point to each of several time series. The new data
+ // point must be more recent than any other point in its time series. Each
+ // `TimeSeries` value must fully specify a unique time series by supplying
+ // all label values for the metric and the monitored resource.
+ //
+ // The maximum number of `TimeSeries` objects per `Create` request is 200.
+ TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+}
+
+func (x *CreateTimeSeriesRequest) Reset() {
+ *x = CreateTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesRequest) ProtoMessage() {}
+
+func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *CreateTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+// DEPRECATED. Used to hold per-time-series error status.
+type CreateTimeSeriesError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // DEPRECATED. Time series ID that resulted in the `status` error.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+ TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // DEPRECATED. The status of the requested write operation for `time_series`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+ Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *CreateTimeSeriesError) Reset() {
+ *x = CreateTimeSeriesError{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesError) ProtoMessage() {}
+
+func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11}
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+func (x *CreateTimeSeriesError) GetStatus() *status.Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+// Summary of the result of a failed request to write data to a time series.
+type CreateTimeSeriesSummary struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of points in the request.
+ TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"`
+ // The number of points that were successfully written.
+ SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"`
+ // The number of points that failed to be written. Order is not guaranteed.
+ Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *CreateTimeSeriesSummary) Reset() {
+ *x = CreateTimeSeriesSummary{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesSummary) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesSummary) ProtoMessage() {}
+
+func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 {
+ if x != nil {
+ return x.TotalPointCount
+ }
+ return 0
+}
+
+func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 {
+ if x != nil {
+ return x.SuccessPointCount
+ }
+ return 0
+}
+
+func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+// The `QueryTimeSeries` request. For information about the status of
+// Monitoring Query Language (MQL), see the [MQL deprecation
+// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+type QueryTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The query in the [Monitoring Query
+ // Language](https://cloud.google.com/monitoring/mql/reference) format.
+ // The default time zone is in UTC.
+ Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
+ // A positive number that is the maximum number of time_series_data to return.
+ PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *QueryTimeSeriesRequest) Reset() {
+ *x = QueryTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryTimeSeriesRequest) ProtoMessage() {}
+
+func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *QueryTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesRequest) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *QueryTimeSeriesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `QueryTimeSeries` response. For information about the status of
+// Monitoring Query Language (MQL), see the [MQL deprecation
+// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+type QueryTimeSeriesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The descriptor for the time series data.
+ TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"`
+ // The time series data.
+ TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results, use that value as
+ // `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Query execution errors that may have caused the time series data returned
+ // to be incomplete. The available data will be available in the
+ // response.
+ PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"`
+}
+
+func (x *QueryTimeSeriesResponse) Reset() {
+ *x = QueryTimeSeriesResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryTimeSeriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryTimeSeriesResponse) ProtoMessage() {}
+
+func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead.
+func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor {
+ if x != nil {
+ return x.TimeSeriesDescriptor
+ }
+ return nil
+}
+
+func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData {
+ if x != nil {
+ return x.TimeSeriesData
+ }
+ return nil
+}
+
+func (x *QueryTimeSeriesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status {
+ if x != nil {
+ return x.PartialErrors
+ }
+ return nil
+}
+
+// This is an error detail intended to be used with INVALID_ARGUMENT errors.
+type QueryErrorList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Errors in parsing the time series query language text. The number of errors
+ // in the response may be limited.
+ Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ // A summary of all the errors.
+ ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"`
+}
+
+func (x *QueryErrorList) Reset() {
+ *x = QueryErrorList{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryErrorList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryErrorList) ProtoMessage() {}
+
+func (x *QueryErrorList) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead.
+func (*QueryErrorList) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *QueryErrorList) GetErrors() []*QueryError {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+func (x *QueryErrorList) GetErrorSummary() string {
+ if x != nil {
+ return x.ErrorSummary
+ }
+ return ""
+}
+
+// Detailed information about an error category.
+type CreateTimeSeriesSummary_Error struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The status of the requested write operation.
+ Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
+ // The number of points that couldn't be written because of `status`.
+ PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"`
+}
+
+func (x *CreateTimeSeriesSummary_Error) Reset() {
+ *x = CreateTimeSeriesSummary_Error{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesSummary_Error) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesSummary_Error) ProtoMessage() {}
+
+func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 {
+ if x != nil {
+ return x.PointCount
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0,
+ 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x37,
+ 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a,
+ 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
+ 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef,
+ 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x24, 0x0a, 0x0b, 0x61, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79,
+ 0x22, 0x94, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12,
+ 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01,
+ 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0xad, 0x04, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26,
+ 0x12, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43,
+ 0x0a, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79,
+ 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79,
+ 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f,
+ 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f,
+ 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
+ 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c,
+ 0x4c, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01,
+ 0x22, 0xd6, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26,
+ 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46,
+ 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x12, 0x45, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x2e, 0x0a, 0x13, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x4b, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0x8c, 0x01, 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x02, 0x18,
+ 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a,
+ 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12,
+ 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52,
+ 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12,
+ 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x6f, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbc, 0x0f, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, 0x01, 0x0a, 0x20, 0x4c, 0x69,
+ 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda,
+ 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12,
+ 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x36, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x29, 0x12, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x13, 0x47,
+ 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xc8, 0x01,
+ 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x5b, 0xda, 0x41, 0x16,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x11, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22,
+ 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a,
+ 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xfe, 0x01, 0x0a, 0x0e,
+ 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x01, 0xda, 0x41, 0x19, 0x6e,
+ 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x2c, 0x76, 0x69, 0x65, 0x77, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x6e, 0x5a, 0x27,
+ 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61,
+ 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x99, 0x01, 0x0a,
+ 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0xda, 0x41, 0x10, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0xae, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, 0xda, 0x41, 0x10,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x01, 0x2a, 0x22, 0x2e, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xda, 0x01, 0xca, 0x41, 0x19, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xba, 0x01, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73,
+ 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x89, 0x08, 0xea, 0x41, 0xf0, 0x01, 0x0a, 0x2a, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f,
+ 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x39, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0xb7,
+ 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0x51, 0x0a, 0x23, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x12, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x2f, 0x7b, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0xea, 0x41, 0xb5, 0x01,
+ 0x0a, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x7d, 0x12, 0x35, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x29, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_metric_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
+var file_google_monitoring_v3_metric_service_proto_goTypes = []any{
+ (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView
+ (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest
+ (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse
+ (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest
+ (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest
+ (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse
+ (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest
+ (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest
+ (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest
+ (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest
+ (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse
+ (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest
+ (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError
+ (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary
+ (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest
+ (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse
+ (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList
+ (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error
+ (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor
+ (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor
+ (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval
+ (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation
+ (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries
+ (*status.Status)(nil), // 23: google.rpc.Status
+ (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor
+ (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData
+ (*QueryError)(nil), // 26: google.monitoring.v3.QueryError
+ (*emptypb.Empty)(nil), // 27: google.protobuf.Empty
+}
+var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{
+ 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor
+ 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor
+ 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor
+ 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval
+ 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation
+ 21, // 5: google.monitoring.v3.ListTimeSeriesRequest.secondary_aggregation:type_name -> google.monitoring.v3.Aggregation
+ 0, // 6: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView
+ 22, // 7: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 23, // 8: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status
+ 22, // 9: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 22, // 10: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 23, // 11: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status
+ 17, // 12: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error
+ 24, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor
+ 25, // 14: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData
+ 23, // 15: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status
+ 26, // 16: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError
+ 23, // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status
+ 1, // 18: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest
+ 3, // 19: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest
+ 4, // 20: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest
+ 6, // 21: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest
+ 7, // 22: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest
+ 8, // 23: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest
+ 9, // 24: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest
+ 11, // 25: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest
+ 11, // 26: google.monitoring.v3.MetricService.CreateServiceTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest
+ 2, // 27: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse
+ 18, // 28: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor
+ 5, // 29: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse
+ 19, // 30: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor
+ 19, // 31: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor
+ 27, // 32: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty
+ 10, // 33: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse
+ 27, // 34: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty
+ 27, // 35: google.monitoring.v3.MetricService.CreateServiceTimeSeries:output_type -> google.protobuf.Empty
+ 27, // [27:36] is the sub-list for method output_type
+ 18, // [18:27] is the sub-list for method input_type
+ 18, // [18:18] is the sub-list for extension type_name
+ 18, // [18:18] is the sub-list for extension extendee
+ 0, // [0:18] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_metric_service_proto_init() }
+func file_google_monitoring_v3_metric_service_proto_init() {
+ if File_google_monitoring_v3_metric_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_metric_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 17,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_metric_service_proto = out.File
+ file_google_monitoring_v3_metric_service_proto_rawDesc = nil
+ file_google_monitoring_v3_metric_service_proto_goTypes = nil
+ file_google_monitoring_v3_metric_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// MetricServiceClient is the client API for MetricService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricServiceClient interface {
+ // Lists monitored resource descriptors that match a filter.
+ ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor.
+ GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter.
+ ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor.
+ GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // The creation is executed asynchronously.
+ // User-created metric descriptors define
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics).
+ // The metric descriptor is updated if it already exists,
+ // except that metric labels are never removed.
+ CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be
+ // deleted.
+ DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists time series that match a filter.
+ ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ // This method does not support
+ // [resource locations constraint of an organization
+ // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+ CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Creates or adds data to one or more service time series. A service time
+ // series is a time series for a metric from a Google Cloud service. The
+ // response is empty if all time series in the request were written. If any
+ // time series could not be written, a corresponding failure message is
+ // included in the error response. This endpoint rejects writes to
+ // user-defined metrics.
+ // This method is only for use by Google Cloud services. Use
+ // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries]
+ // instead.
+ CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type metricServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient {
+ return &metricServiceClient{cc}
+}
+
+func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
+ out := new(ListMonitoredResourceDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) {
+ out := new(monitoredres.MonitoredResourceDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) {
+ out := new(ListMetricDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) {
+ out := new(ListTimeSeriesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MetricServiceServer is the server API for MetricService service.
+type MetricServiceServer interface {
+ // Lists monitored resource descriptors that match a filter.
+ ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor.
+ GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter.
+ ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor.
+ GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // The creation is executed asynchronously.
+ // User-created metric descriptors define
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics).
+ // The metric descriptor is updated if it already exists,
+ // except that metric labels are never removed.
+ CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be
+ // deleted.
+ DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error)
+ // Lists time series that match a filter.
+ ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ // This method does not support
+ // [resource locations constraint of an organization
+ // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+ CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error)
+ // Creates or adds data to one or more service time series. A service time
+ // series is a time series for a metric from a Google Cloud service. The
+ // response is empty if all time series in the request were written. If any
+ // time series could not be written, a corresponding failure message is
+ // included in the error response. This endpoint rejects writes to
+ // user-defined metrics.
+ // This method is only for use by Google Cloud services. Use
+ // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries]
+ // instead.
+ CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error)
+}
+
+// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedMetricServiceServer struct {
+}
+
+func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented")
+}
+func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented")
+}
+func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateServiceTimeSeries not implemented")
+}
+
+func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) {
+ s.RegisterService(&_MetricService_serviceDesc, srv)
+}
+
+func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMonitoredResourceDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMonitoredResourceDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMetricDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateServiceTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateServiceTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, req.(*CreateTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _MetricService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.MetricService",
+ HandlerType: (*MetricServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListMonitoredResourceDescriptors",
+ Handler: _MetricService_ListMonitoredResourceDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMonitoredResourceDescriptor",
+ Handler: _MetricService_GetMonitoredResourceDescriptor_Handler,
+ },
+ {
+ MethodName: "ListMetricDescriptors",
+ Handler: _MetricService_ListMetricDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMetricDescriptor",
+ Handler: _MetricService_GetMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "CreateMetricDescriptor",
+ Handler: _MetricService_CreateMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "DeleteMetricDescriptor",
+ Handler: _MetricService_DeleteMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "ListTimeSeries",
+ Handler: _MetricService_ListTimeSeries_Handler,
+ },
+ {
+ MethodName: "CreateTimeSeries",
+ Handler: _MetricService_CreateTimeSeries_Handler,
+ },
+ {
+ MethodName: "CreateServiceTimeSeries",
+ Handler: _MetricService_CreateServiceTimeSeries_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/metric_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
new file mode 100644
index 000000000..e03d89efe
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
@@ -0,0 +1,176 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/mutation_record.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes a change made to a configuration.
+type MutationRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // When the change occurred.
+ MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"`
+ // The email address of the user making the change.
+ MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"`
+}
+
+func (x *MutationRecord) Reset() {
+ *x = MutationRecord{}
+ mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MutationRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MutationRecord) ProtoMessage() {}
+
+func (x *MutationRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead.
+func (*MutationRecord) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.MutateTime
+ }
+ return nil
+}
+
+func (x *MutationRecord) GetMutatedBy() string {
+ if x != nil {
+ return x.MutatedBy
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42,
+ 0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13,
+ 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc
+)
+
+func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_mutation_record_proto_rawDescData
+}
+
+var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{
+ (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord
+ (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
+}
+var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_mutation_record_proto_init() }
+func file_google_monitoring_v3_mutation_record_proto_init() {
+ if File_google_monitoring_v3_mutation_record_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_mutation_record_proto = out.File
+ file_google_monitoring_v3_mutation_record_proto_rawDesc = nil
+ file_google_monitoring_v3_mutation_record_proto_goTypes = nil
+ file_google_monitoring_v3_mutation_record_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
new file mode 100644
index 000000000..0d5cacbec
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
@@ -0,0 +1,619 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/notification.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ api "google.golang.org/genproto/googleapis/api"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ label "google.golang.org/genproto/googleapis/api/label"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates whether the channel has been verified or not. It is illegal
+// to specify this field in a
+// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
+// or an
+// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+// operation.
+type NotificationChannel_VerificationStatus int32
+
+const (
+ // Sentinel value used to indicate that the state is unknown, omitted, or
+ // is not applicable (as in the case of channels that neither support
+ // nor require verification in order to function).
+ NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
+ // The channel has yet to be verified and requires verification to function.
+ // Note that this state also applies to the case where the verification
+ // process has been initiated by sending a verification code but where
+ // the verification code has not been submitted to complete the process.
+ NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
+ // It has been proven that notifications can be received on this
+ // notification channel and that someone on the project has access
+ // to messages that are delivered to that channel.
+ NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
+)
+
+// Enum value maps for NotificationChannel_VerificationStatus.
+var (
+ NotificationChannel_VerificationStatus_name = map[int32]string{
+ 0: "VERIFICATION_STATUS_UNSPECIFIED",
+ 1: "UNVERIFIED",
+ 2: "VERIFIED",
+ }
+ NotificationChannel_VerificationStatus_value = map[string]int32{
+ "VERIFICATION_STATUS_UNSPECIFIED": 0,
+ "UNVERIFIED": 1,
+ "VERIFIED": 2,
+ }
+)
+
+func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus {
+ p := new(NotificationChannel_VerificationStatus)
+ *p = x
+ return p
+}
+
+func (x NotificationChannel_VerificationStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor()
+}
+
+func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_notification_proto_enumTypes[0]
+}
+
+func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead.
+func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// A description of a notification channel. The descriptor includes
+// the properties of the channel and the set of labels or fields that
+// must be specified to configure channels of a given type.
+type NotificationChannelDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full REST resource name for this descriptor. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE]
+ //
+ // In the above, `[TYPE]` is the value of the `type` field.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // The type of notification channel, such as "email" and "sms". To view the
+ // full list of channels, see
+ // [Channel
+ // descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd).
+ // Notification channel types are globally unique.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // A human-readable name for the notification channel type. This
+ // form of the name is suitable for a user interface.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // A human-readable description of the notification channel
+ // type. The description may include a description of the properties
+ // of the channel and pointers to external documentation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // The set of labels that must be defined to identify a particular
+ // channel of the corresponding type. Each label includes a
+ // description for how that field should be populated.
+ Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ // The tiers that support this notification channel; the project service tier
+ // must be one of the supported_tiers.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
+ SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"`
+ // The product launch stage for channels of this type.
+ LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"`
+}
+
+func (x *NotificationChannelDescriptor) Reset() {
+ *x = NotificationChannelDescriptor{}
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NotificationChannelDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotificationChannelDescriptor) ProtoMessage() {}
+
+func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead.
+func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NotificationChannelDescriptor) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
+func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
+ if x != nil {
+ return x.SupportedTiers
+ }
+ return nil
+}
+
+func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage {
+ if x != nil {
+ return x.LaunchStage
+ }
+ return api.LaunchStage(0)
+}
+
+// A `NotificationChannel` is a medium through which an alert is
+// delivered when a policy violation is detected. Examples of channels
+// include email, SMS, and third-party messaging applications. Fields
+// containing sensitive information like authentication tokens or
+// contact info are only partially populated on retrieval.
+type NotificationChannel struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of the notification channel. This field matches the
+ // value of the
+ // [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type]
+ // field.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Identifier. The full REST resource name for this channel. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ //
+ // The `[CHANNEL_ID]` is automatically assigned by the server on creation.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional human-readable name for this notification channel. It is
+ // recommended that you specify a non-empty and unique name in order to
+ // make it easier to identify the channels in your project, though this is
+ // not enforced. The display name is limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // An optional human-readable description of this notification channel. This
+ // description may provide additional details, beyond the display
+ // name, for the channel. This may not exceed 1024 Unicode characters.
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ // Configuration fields that define the channel and its behavior. The
+ // permissible and required labels are specified in the
+ // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels]
+ // of the `NotificationChannelDescriptor` corresponding to the `type` field.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // User-supplied key/value data that does not need to conform to
+ // the corresponding `NotificationChannelDescriptor`'s schema, unlike
+ // the `labels` field. This field is intended to be used for organizing
+ // and identifying the `NotificationChannel` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Indicates whether this channel has been verified or not. On a
+ // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // or
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation, this field is expected to be populated.
+ //
+ // If the value is `UNVERIFIED`, then it indicates that the channel is
+ // non-functioning (it both requires verification and lacks verification);
+ // otherwise, it is assumed that the channel works.
+ //
+ // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
+ // the channel is of a type that does not require verification or that
+ // this specific channel has been exempted from verification because it was
+ // created prior to verification being required for channels of this type.
+ //
+ // This field cannot be modified using a standard
+ // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+ // operation. To change the value of this field, you must call
+ // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
+ VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
+ // Whether notifications are forwarded to the described channel. This makes
+ // it possible to disable delivery of notifications to a particular channel
+ // without removing the channel from all alerting policies that reference
+ // the channel. This is a more convenient approach when the change is
+ // temporary and you want to receive notifications from the same set
+ // of alerting policies on the channel at some point in the future.
+ Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Record of the creation of this channel.
+ CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
+ // Records of the modification of this channel.
+ MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"`
+}
+
+func (x *NotificationChannel) Reset() {
+ *x = NotificationChannel{}
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NotificationChannel) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotificationChannel) ProtoMessage() {}
+
+func (x *NotificationChannel) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead.
+func (*NotificationChannel) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *NotificationChannel) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
+ if x != nil {
+ return x.VerificationStatus
+ }
+ return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
+}
+
+func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Enabled
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetCreationRecord() *MutationRecord {
+ if x != nil {
+ return x.CreationRecord
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetMutationRecords() []*MutationRecord {
+ if x != nil {
+ return x.MutationRecords
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_notification_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64,
+ 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20,
+ 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61,
+ 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
+ 0x65, 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65,
+ 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+ 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+ 0x65, 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x7d, 0x12, 0x01, 0x2a, 0x22, 0xcb, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73,
+ 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a,
+ 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73,
+ 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d,
+ 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63,
+ 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a,
+ 0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39,
+ 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23,
+ 0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53,
+ 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73,
+ 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12,
+ 0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56,
+ 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56,
+ 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc
+)
+
+func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_notification_proto_rawDescData
+}
+
+var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_monitoring_v3_notification_proto_goTypes = []any{
+ (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus
+ (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor
+ (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel
+ nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry
+ nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry
+ (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor
+ (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier
+ (api.LaunchStage)(0), // 7: google.api.LaunchStage
+ (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue
+ (*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord
+}
+var file_google_monitoring_v3_notification_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor
+ 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier
+ 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage
+ 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry
+ 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry
+ 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus
+ 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue
+ 9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord
+ 9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_notification_proto_init() }
+func file_google_monitoring_v3_notification_proto_init() {
+ if File_google_monitoring_v3_notification_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_mutation_record_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_notification_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_notification_proto = out.File
+ file_google_monitoring_v3_notification_proto_rawDesc = nil
+ file_google_monitoring_v3_notification_proto_goTypes = nil
+ file_google_monitoring_v3_notification_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
new file mode 100644
index 000000000..fd0230036
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
@@ -0,0 +1,1819 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/notification_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `ListNotificationChannelDescriptors` request.
+type ListNotificationChannelDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The REST resource name of the parent from which to retrieve
+ // the notification channel descriptors. The expected syntax is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this
+ // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent
+ // container in which to look for the descriptors; to retrieve a single
+ // descriptor by name, use the
+ // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) Reset() {
+ *x = ListNotificationChannelDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannelDescriptors` response.
+type ListNotificationChannelDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The monitored resource descriptors supported for the specified
+ // project, optionally filtered.
+ ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) Reset() {
+ *x = ListNotificationChannelDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor {
+ if x != nil {
+ return x.ChannelDescriptors
+ }
+ return nil
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannelDescriptor` response.
+type GetNotificationChannelDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel type for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetNotificationChannelDescriptorRequest) Reset() {
+ *x = GetNotificationChannelDescriptorRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetNotificationChannelDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateNotificationChannel` request.
+type CreateNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // This names the container into which the channel will be
+ // written, this does not name the newly created channel. The resulting
+ // channel's name will have a normalized version of this field as a prefix,
+ // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The definition of the `NotificationChannel` to create.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+}
+
+func (x *CreateNotificationChannelRequest) Reset() {
+ *x = CreateNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateNotificationChannelRequest) ProtoMessage() {}
+
+func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if x != nil {
+ return x.NotificationChannel
+ }
+ return nil
+}
+
+// The `ListNotificationChannels` request.
+type ListNotificationChannelsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // This names the container
+ // in which to look for the notification channels; it does not name a
+ // specific channel. To query a specific channel by REST resource name, use
+ // the
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If provided, this field specifies the criteria that must be met
+ // by notification channels to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A comma-separated list of fields by which to sort the result.
+ // Supports the same set of fields as in `filter`. Entries can be prefixed
+ // with a minus sign to sort in descending rather than ascending order.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Optional. The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelsRequest) Reset() {
+ *x = ListNotificationChannelsRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelsRequest) ProtoMessage() {}
+
+func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListNotificationChannelsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListNotificationChannelsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannels` response.
+type ListNotificationChannelsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The notification channels defined for the specified project.
+ NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of notification channels in all pages. This number is only
+ // an estimate, and may change in subsequent pages. https://aip.dev/158
+ TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListNotificationChannelsResponse) Reset() {
+ *x = ListNotificationChannelsResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelsResponse) ProtoMessage() {}
+
+func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel {
+ if x != nil {
+ return x.NotificationChannels
+ }
+ return nil
+}
+
+func (x *ListNotificationChannelsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The `GetNotificationChannel` request.
+type GetNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetNotificationChannelRequest) Reset() {
+ *x = GetNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `UpdateNotificationChannel` request.
+type UpdateNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. The fields to update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. A description of the changes to be applied to the specified
+ // notification channel. The description must provide a definition for
+ // fields to be updated; the names of these fields should also be
+ // included in the `update_mask`.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+}
+
+func (x *UpdateNotificationChannelRequest) Reset() {
+ *x = UpdateNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateNotificationChannelRequest) ProtoMessage() {}
+
+func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if x != nil {
+ return x.NotificationChannel
+ }
+ return nil
+}
+
+// The `DeleteNotificationChannel` request.
+type DeleteNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // If true, the notification channel will be deleted regardless of its
+ // use in alert policies (the policies will be updated to remove the
+ // channel). If false, this operation will fail if the notification channel
+ // is referenced by existing alerting policies.
+ Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"`
+}
+
+func (x *DeleteNotificationChannelRequest) Reset() {
+ *x = DeleteNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteNotificationChannelRequest) ProtoMessage() {}
+
+func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *DeleteNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteNotificationChannelRequest) GetForce() bool {
+ if x != nil {
+ return x.Force
+ }
+ return false
+}
+
+// The `SendNotificationChannelVerificationCode` request.
+type SendNotificationChannelVerificationCodeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel to which to send a verification code.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) Reset() {
+ *x = SendNotificationChannelVerificationCodeRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+
+func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
+func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel for which a verification code is to be
+ // generated and retrieved. This must name a channel that is already verified;
+ // if the specified channel is not verified, the request will fail.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The desired expiration time. If specified, the API will guarantee that
+ // the returned code will not be valid after the specified timestamp;
+ // however, the API cannot guarantee that the returned code will be
+ // valid for at least as long as the requested time (the API puts an upper
+ // bound on the amount of time for which a code may be valid). If omitted,
+ // a default expiration will be used, which may be less than the max
+ // permissible expiration (so specifying an expiration may extend the
+ // code's lifetime over omitting an expiration, even though the API does
+ // impose an upper limit on the maximum expiration that is permitted).
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) Reset() {
+ *x = GetNotificationChannelVerificationCodeRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The verification code, which may be used to verify other channels
+ // that have an equivalent identity (i.e. other channels of the same
+ // type with the same fingerprint such as other email channels with
+ // the same email address or other sms channels with the same number).
+ Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
+ // The expiration time associated with the code that was returned. If
+ // an expiration was provided in the request, this is the minimum of the
+ // requested expiration in the request and the max permitted expiration.
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) Reset() {
+ *x = GetNotificationChannelVerificationCodeResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {}
+
+func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string {
+ if x != nil {
+ return x.Code
+ }
+ return ""
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+// The `VerifyNotificationChannel` request.
+type VerifyNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel to verify.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The verification code that was delivered to the channel as
+ // a result of invoking the `SendNotificationChannelVerificationCode` API
+ // method or that was retrieved from a verified channel via
+ // `GetNotificationChannelVerificationCode`. For example, one might have
+ // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
+ // guaranteed that the code is valid UTF-8; one should not
+ // make any assumptions regarding the structure or format of the code).
+ Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"`
+}
+
+func (x *VerifyNotificationChannelRequest) Reset() {
+ *x = VerifyNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VerifyNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VerifyNotificationChannelRequest) ProtoMessage() {}
+
+func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *VerifyNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *VerifyNotificationChannelRequest) GetCode() string {
+ if x != nil {
+ return x.Code
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0x7e, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39,
+ 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
+ 0xd0, 0x01, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x22, 0xef, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e,
+ 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x20,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc9, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
+ 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65,
+ 0x22, 0x6a, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x01, 0x0a,
+ 0x20, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
+ 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
+ 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e,
+ 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65,
+ 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
+ 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
+ 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xdd,
+ 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc4,
+ 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12,
+ 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01,
+ 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64,
+ 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x42, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x83, 0x02, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0xda, 0x41, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x59, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x32, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f,
+ 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xdc, 0x01, 0x0a, 0x27,
+ 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47,
+ 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x52, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a,
+ 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x3a, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x64, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63,
+ 0x6f, 0x64, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
+ 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd3, 0x01,
+ 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_notification_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_google_monitoring_v3_notification_service_proto_goTypes = []any{
+ (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest
+ (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse
+ (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest
+ (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest
+ (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest
+ (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse
+ (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest
+ (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest
+ (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest
+ (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
+ (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
+ (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
+ (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest
+ (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor
+ (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel
+ (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask
+ (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+}
+var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{
+ 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor
+ 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
+ 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel
+ 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
+ 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp
+ 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp
+ 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest
+ 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest
+ 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest
+ 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest
+ 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest
+ 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest
+ 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest
+ 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
+ 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
+ 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest
+ 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse
+ 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor
+ 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse
+ 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty
+ 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty
+ 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
+ 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 17, // [17:27] is the sub-list for method output_type
+ 7, // [7:17] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_notification_service_proto_init() }
+func file_google_monitoring_v3_notification_service_proto_init() {
+ if File_google_monitoring_v3_notification_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_notification_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_notification_service_proto = out.File
+ file_google_monitoring_v3_notification_service_proto_rawDesc = nil
+ file_google_monitoring_v3_notification_service_proto_goTypes = nil
+ file_google_monitoring_v3_notification_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// NotificationChannelServiceClient is the client API for NotificationChannelService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NotificationChannelServiceClient interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ // To list the types of notification channels that are supported, use
+ // the `ListNotificationChannelDescriptors` method.
+ ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or PagerDuty service.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+}
+
+type notificationChannelServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient {
+ return ¬ificationChannelServiceClient{cc}
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
+ out := new(ListNotificationChannelDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) {
+ out := new(NotificationChannelDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) {
+ out := new(ListNotificationChannelsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
+ out := new(GetNotificationChannelVerificationCodeResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NotificationChannelServiceServer is the server API for NotificationChannelService service.
+type NotificationChannelServiceServer interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ // To list the types of notification channels that are supported, use
+ // the `ListNotificationChannelDescriptors` method.
+ ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or PagerDuty service.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error)
+}
+
+// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedNotificationChannelServiceServer struct {
+}
+
+func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented")
+}
+
+func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) {
+ s.RegisterService(&_NotificationChannelService_serviceDesc, srv)
+}
+
+func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VerifyNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.NotificationChannelService",
+ HandlerType: (*NotificationChannelServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListNotificationChannelDescriptors",
+ Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelDescriptor",
+ Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler,
+ },
+ {
+ MethodName: "ListNotificationChannels",
+ Handler: _NotificationChannelService_ListNotificationChannels_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannel",
+ Handler: _NotificationChannelService_GetNotificationChannel_Handler,
+ },
+ {
+ MethodName: "CreateNotificationChannel",
+ Handler: _NotificationChannelService_CreateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "UpdateNotificationChannel",
+ Handler: _NotificationChannelService_UpdateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "DeleteNotificationChannel",
+ Handler: _NotificationChannelService_DeleteNotificationChannel_Handler,
+ },
+ {
+ MethodName: "SendNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "VerifyNotificationChannel",
+ Handler: _NotificationChannelService_VerifyNotificationChannel_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/notification_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
new file mode 100644
index 000000000..6402f18ca
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
@@ -0,0 +1,221 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/query_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x32, 0xe1, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22,
+ 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x88, 0x02, 0x01, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76,
+ 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_google_monitoring_v3_query_service_proto_goTypes = []any{
+ (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest
+ (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse
+}
+var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{
+ 0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest
+ 1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_query_service_proto_init() }
+func file_google_monitoring_v3_query_service_proto_init() {
+ if File_google_monitoring_v3_query_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_metric_service_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_query_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs,
+ }.Build()
+ File_google_monitoring_v3_query_service_proto = out.File
+ file_google_monitoring_v3_query_service_proto_rawDesc = nil
+ file_google_monitoring_v3_query_service_proto_goTypes = nil
+ file_google_monitoring_v3_query_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// QueryServiceClient is the client API for QueryService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryServiceClient interface {
+ // Deprecated: Do not use.
+ // Queries time series by using Monitoring Query Language (MQL). We recommend
+ // using PromQL instead of MQL. For more information about the status of MQL,
+ // see the [MQL deprecation
+ // notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+ QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error)
+}
+
+type queryServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient {
+ return &queryServiceClient{cc}
+}
+
+// Deprecated: Do not use.
+func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) {
+ out := new(QueryTimeSeriesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServiceServer is the server API for QueryService service.
+type QueryServiceServer interface {
+ // Deprecated: Do not use.
+ // Queries time series by using Monitoring Query Language (MQL). We recommend
+ // using PromQL instead of MQL. For more information about the status of MQL,
+ // see the [MQL deprecation
+ // notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+ QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error)
+}
+
+// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServiceServer struct {
+}
+
+func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented")
+}
+
+func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) {
+ s.RegisterService(&_QueryService_serviceDesc, srv)
+}
+
+func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServiceServer).QueryTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _QueryService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.QueryService",
+ HandlerType: (*QueryServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "QueryTimeSeries",
+ Handler: _QueryService_QueryTimeSeries_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/query_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
new file mode 100644
index 000000000..a9d2ae8cb
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
@@ -0,0 +1,2755 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/service.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// `ServiceLevelObjective.View` determines what form of
+// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`,
+// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs.
+type ServiceLevelObjective_View int32
+
+const (
+ // Same as FULL.
+ ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0
+ // Return the embedded `ServiceLevelIndicator` in the form in which it was
+ // defined. If it was defined using a `BasicSli`, return that `BasicSli`.
+ ServiceLevelObjective_FULL ServiceLevelObjective_View = 2
+ // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead
+ // return the `ServiceLevelIndicator` with its mode of computation fully
+ // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using
+ // `RequestBasedSli` or `WindowsBasedSli`, return the
+ // `ServiceLevelIndicator` as it was provided.
+ ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1
+)
+
+// Enum value maps for ServiceLevelObjective_View.
+var (
+ ServiceLevelObjective_View_name = map[int32]string{
+ 0: "VIEW_UNSPECIFIED",
+ 2: "FULL",
+ 1: "EXPLICIT",
+ }
+ ServiceLevelObjective_View_value = map[string]int32{
+ "VIEW_UNSPECIFIED": 0,
+ "FULL": 2,
+ "EXPLICIT": 1,
+ }
+)
+
+func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View {
+ p := new(ServiceLevelObjective_View)
+ *p = x
+ return p
+}
+
+func (x ServiceLevelObjective_View) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor()
+}
+
+func (ServiceLevelObjective_View) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_service_proto_enumTypes[0]
+}
+
+func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServiceLevelObjective_View.Descriptor instead.
+func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// A `Service` is a discrete, autonomous, and network-accessible unit, designed
+// to solve an individual concern
+// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In
+// Cloud Monitoring, a `Service` acts as the root resource under which
+// operational aspects of the service are accessible.
+type Service struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Resource name for this Service. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Name used for UI elements listing this Service.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // REQUIRED. Service-identifying atoms specifying the underlying service.
+ //
+ // Types that are assignable to Identifier:
+ //
+ // *Service_Custom_
+ // *Service_AppEngine_
+ // *Service_CloudEndpoints_
+ // *Service_ClusterIstio_
+ // *Service_MeshIstio_
+ // *Service_IstioCanonicalService_
+ // *Service_CloudRun_
+ // *Service_GkeNamespace_
+ // *Service_GkeWorkload_
+ // *Service_GkeService_
+ Identifier isService_Identifier `protobuf_oneof:"identifier"`
+ // Message that contains the service type and service labels of this service
+ // if it is a basic service.
+ // Documentation and examples
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"`
+ // Configuration for how to query telemetry on a Service.
+ Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"`
+ // Labels which have been used to annotate the service. Label keys must start
+ // with a letter. Label keys and values may contain lowercase letters,
+ // numbers, underscores, and dashes. Label keys and values have a maximum
+ // length of 63 characters, and must be less than 128 bytes in size. Up to 64
+ // label entries may be stored. For labels which do not have a semantic value,
+ // the empty string may be supplied for the label value.
+ UserLabels map[string]string `protobuf:"bytes,14,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Service) Reset() {
+ *x = Service{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service) ProtoMessage() {}
+
+func (x *Service) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service.ProtoReflect.Descriptor instead.
+func (*Service) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Service) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Service) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *Service) GetIdentifier() isService_Identifier {
+ if m != nil {
+ return m.Identifier
+ }
+ return nil
+}
+
+func (x *Service) GetCustom() *Service_Custom {
+ if x, ok := x.GetIdentifier().(*Service_Custom_); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *Service) GetAppEngine() *Service_AppEngine {
+ if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok {
+ return x.AppEngine
+ }
+ return nil
+}
+
+func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints {
+ if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok {
+ return x.CloudEndpoints
+ }
+ return nil
+}
+
+func (x *Service) GetClusterIstio() *Service_ClusterIstio {
+ if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok {
+ return x.ClusterIstio
+ }
+ return nil
+}
+
+func (x *Service) GetMeshIstio() *Service_MeshIstio {
+ if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok {
+ return x.MeshIstio
+ }
+ return nil
+}
+
+func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService {
+ if x, ok := x.GetIdentifier().(*Service_IstioCanonicalService_); ok {
+ return x.IstioCanonicalService
+ }
+ return nil
+}
+
+func (x *Service) GetCloudRun() *Service_CloudRun {
+ if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok {
+ return x.CloudRun
+ }
+ return nil
+}
+
+func (x *Service) GetGkeNamespace() *Service_GkeNamespace {
+ if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok {
+ return x.GkeNamespace
+ }
+ return nil
+}
+
+func (x *Service) GetGkeWorkload() *Service_GkeWorkload {
+ if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok {
+ return x.GkeWorkload
+ }
+ return nil
+}
+
+func (x *Service) GetGkeService() *Service_GkeService {
+ if x, ok := x.GetIdentifier().(*Service_GkeService_); ok {
+ return x.GkeService
+ }
+ return nil
+}
+
+func (x *Service) GetBasicService() *Service_BasicService {
+ if x != nil {
+ return x.BasicService
+ }
+ return nil
+}
+
+func (x *Service) GetTelemetry() *Service_Telemetry {
+ if x != nil {
+ return x.Telemetry
+ }
+ return nil
+}
+
+func (x *Service) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isService_Identifier interface {
+ isService_Identifier()
+}
+
+type Service_Custom_ struct {
+ // Custom service type.
+ Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"`
+}
+
+type Service_AppEngine_ struct {
+ // Type used for App Engine services.
+ AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"`
+}
+
+type Service_CloudEndpoints_ struct {
+ // Type used for Cloud Endpoints services.
+ CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"`
+}
+
+type Service_ClusterIstio_ struct {
+ // Type used for Istio services that live in a Kubernetes cluster.
+ ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"`
+}
+
+type Service_MeshIstio_ struct {
+ // Type used for Istio services scoped to an Istio mesh.
+ MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"`
+}
+
+type Service_IstioCanonicalService_ struct {
+ // Type used for canonical services scoped to an Istio mesh.
+ // Metrics for Istio are
+ // [documented here](https://istio.io/latest/docs/reference/config/metrics/)
+ IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"`
+}
+
+type Service_CloudRun_ struct {
+ // Type used for Cloud Run services.
+ CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"`
+}
+
+type Service_GkeNamespace_ struct {
+ // Type used for GKE Namespaces.
+ GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"`
+}
+
+type Service_GkeWorkload_ struct {
+ // Type used for GKE Workloads.
+ GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"`
+}
+
+type Service_GkeService_ struct {
+ // Type used for GKE Services (the Kubernetes concept of a service).
+ GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"`
+}
+
+func (*Service_Custom_) isService_Identifier() {}
+
+func (*Service_AppEngine_) isService_Identifier() {}
+
+func (*Service_CloudEndpoints_) isService_Identifier() {}
+
+func (*Service_ClusterIstio_) isService_Identifier() {}
+
+func (*Service_MeshIstio_) isService_Identifier() {}
+
+func (*Service_IstioCanonicalService_) isService_Identifier() {}
+
+func (*Service_CloudRun_) isService_Identifier() {}
+
+func (*Service_GkeNamespace_) isService_Identifier() {}
+
+func (*Service_GkeWorkload_) isService_Identifier() {}
+
+func (*Service_GkeService_) isService_Identifier() {}
+
+// A Service-Level Objective (SLO) describes a level of desired good service. It
+// consists of a service-level indicator (SLI), a performance goal, and a period
+// over which the objective is to be evaluated against that goal. The SLO can
+// use SLIs defined in a number of different manners. Typical SLOs might include
+// "99% of requests in each rolling week have latency below 200 milliseconds" or
+// "99.5% of requests in each calendar month return successfully."
+type ServiceLevelObjective struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Resource name for this `ServiceLevelObjective`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Name used for UI elements listing this SLO.
+ DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The definition of good service, used to measure and calculate the quality
+ // of the `Service`'s performance with respect to a single aspect of service
+ // quality.
+ ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"`
+ // The fraction of service that must be good in order for this objective to be
+ // met. `0 < goal <= 0.9999`.
+ Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"`
+ // The time period over which the objective will be evaluated.
+ //
+ // Types that are assignable to Period:
+ //
+ // *ServiceLevelObjective_RollingPeriod
+ // *ServiceLevelObjective_CalendarPeriod
+ Period isServiceLevelObjective_Period `protobuf_oneof:"period"`
+ // Labels which have been used to annotate the service-level objective. Label
+ // keys must start with a letter. Label keys and values may contain lowercase
+ // letters, numbers, underscores, and dashes. Label keys and values have a
+ // maximum length of 63 characters, and must be less than 128 bytes in size.
+ // Up to 64 label entries may be stored. For labels which do not have a
+ // semantic value, the empty string may be supplied for the label value.
+ UserLabels map[string]string `protobuf:"bytes,12,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ServiceLevelObjective) Reset() {
+ *x = ServiceLevelObjective{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceLevelObjective) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceLevelObjective) ProtoMessage() {}
+
+func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead.
+func (*ServiceLevelObjective) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ServiceLevelObjective) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ServiceLevelObjective) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator {
+ if x != nil {
+ return x.ServiceLevelIndicator
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetGoal() float64 {
+ if x != nil {
+ return x.Goal
+ }
+ return 0
+}
+
+func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period {
+ if m != nil {
+ return m.Period
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration {
+ if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok {
+ return x.RollingPeriod
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod {
+ if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok {
+ return x.CalendarPeriod
+ }
+ return calendarperiod.CalendarPeriod(0)
+}
+
+func (x *ServiceLevelObjective) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isServiceLevelObjective_Period interface {
+ isServiceLevelObjective_Period()
+}
+
+type ServiceLevelObjective_RollingPeriod struct {
+ // A rolling time period, semantically "in the past ``".
+ // Must be an integer multiple of 1 day no larger than 30 days.
+ RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"`
+}
+
+type ServiceLevelObjective_CalendarPeriod struct {
+ // A calendar period, semantically "since the start of the current
+ // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and
+ // `MONTH` are supported.
+ CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"`
+}
+
+func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {}
+
+func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {}
+
+// A Service-Level Indicator (SLI) describes the "performance" of a service. For
+// some services, the SLI is well-defined. In such cases, the SLI can be
+// described easily by referencing the well-known SLI and providing the needed
+// parameters. Alternatively, a "custom" SLI can be defined with a query to the
+// underlying metric store. An SLI is defined to be `good_service /
+// total_service` over any queried time interval. The value of performance
+// always falls into the range `0 <= performance <= 1`. A custom SLI describes
+// how to compute this ratio, whether this is by dividing values from a pair of
+// time series, cutting a `Distribution` into good and bad counts, or counting
+// time windows in which the service complies with a criterion. For separation
+// of concerns, a single Service-Level Indicator measures performance for only
+// one aspect of service quality, such as fraction of successful queries or
+// fast-enough queries.
+type ServiceLevelIndicator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Service level indicators can be grouped by whether the "unit" of service
+ // being measured is based on counts of good requests or on counts of good
+ // time windows
+ //
+ // Types that are assignable to Type:
+ //
+ // *ServiceLevelIndicator_BasicSli
+ // *ServiceLevelIndicator_RequestBased
+ // *ServiceLevelIndicator_WindowsBased
+ Type isServiceLevelIndicator_Type `protobuf_oneof:"type"`
+}
+
+func (x *ServiceLevelIndicator) Reset() {
+ *x = ServiceLevelIndicator{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceLevelIndicator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceLevelIndicator) ProtoMessage() {}
+
+func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead.
+func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok {
+ return x.BasicSli
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok {
+ return x.RequestBased
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok {
+ return x.WindowsBased
+ }
+ return nil
+}
+
+type isServiceLevelIndicator_Type interface {
+ isServiceLevelIndicator_Type()
+}
+
+type ServiceLevelIndicator_BasicSli struct {
+ // Basic SLI on a well-known service type.
+ BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"`
+}
+
+type ServiceLevelIndicator_RequestBased struct {
+ // Request-based SLIs
+ RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"`
+}
+
+type ServiceLevelIndicator_WindowsBased struct {
+ // Windows-based SLIs
+ WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"`
+}
+
+func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {}
+
+func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {}
+
+func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {}
+
+// An SLI measuring performance on a well-known service type. Performance will
+// be computed on the basis of pre-defined metrics. The type of the
+// `service_resource` determines the metrics to use and the
+// `service_resource.labels` and `metric_labels` are used to construct a
+// monitoring filter to filter that metric down to just the data relevant to
+// this service.
+type BasicSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from
+ // other methods will not be used to calculate performance for this SLI. If
+ // omitted, this SLI applies to all the Service's methods. For service types
+ // that don't support breaking down by method, setting this field will result
+ // in an error.
+ Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"`
+ // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry
+ // from other locations will not be used to calculate performance for this
+ // SLI. If omitted, this SLI applies to all locations in which the Service has
+ // activity. For service types that don't support breaking down by location,
+ // setting this field will result in an error.
+ Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"`
+ // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry
+ // from other API versions will not be used to calculate performance for this
+ // SLI. If omitted, this SLI applies to all API versions. For service types
+ // that don't support breaking down by version, setting this field will result
+ // in an error.
+ Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"`
+ // This SLI can be evaluated on the basis of availability or latency.
+ //
+ // Types that are assignable to SliCriteria:
+ //
+ // *BasicSli_Availability
+ // *BasicSli_Latency
+ SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"`
+}
+
+func (x *BasicSli) Reset() {
+ *x = BasicSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli) ProtoMessage() {}
+
+func (x *BasicSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead.
+func (*BasicSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *BasicSli) GetMethod() []string {
+ if x != nil {
+ return x.Method
+ }
+ return nil
+}
+
+func (x *BasicSli) GetLocation() []string {
+ if x != nil {
+ return x.Location
+ }
+ return nil
+}
+
+func (x *BasicSli) GetVersion() []string {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria {
+ if m != nil {
+ return m.SliCriteria
+ }
+ return nil
+}
+
+func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria {
+ if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok {
+ return x.Availability
+ }
+ return nil
+}
+
+func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria {
+ if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok {
+ return x.Latency
+ }
+ return nil
+}
+
+type isBasicSli_SliCriteria interface {
+ isBasicSli_SliCriteria()
+}
+
+type BasicSli_Availability struct {
+ // Good service is defined to be the count of requests made to this service
+ // that return successfully.
+ Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"`
+}
+
+type BasicSli_Latency struct {
+ // Good service is defined to be the count of requests made to this service
+ // that are fast enough with respect to `latency.threshold`.
+ Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"`
+}
+
+func (*BasicSli_Availability) isBasicSli_SliCriteria() {}
+
+func (*BasicSli_Latency) isBasicSli_SliCriteria() {}
+
+// Range of numerical values within `min` and `max`.
+type Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Range minimum.
+ Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"`
+ // Range maximum.
+ Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"`
+}
+
+func (x *Range) Reset() {
+ *x = Range{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Range) ProtoMessage() {}
+
+func (x *Range) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Range.ProtoReflect.Descriptor instead.
+func (*Range) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Range) GetMin() float64 {
+ if x != nil {
+ return x.Min
+ }
+ return 0
+}
+
+func (x *Range) GetMax() float64 {
+ if x != nil {
+ return x.Max
+ }
+ return 0
+}
+
+// Service Level Indicators for which atomic units of service are counted
+// directly.
+type RequestBasedSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The means to compute a ratio of `good_service` to `total_service`.
+ //
+ // Types that are assignable to Method:
+ //
+ // *RequestBasedSli_GoodTotalRatio
+ // *RequestBasedSli_DistributionCut
+ Method isRequestBasedSli_Method `protobuf_oneof:"method"`
+}
+
+func (x *RequestBasedSli) Reset() {
+ *x = RequestBasedSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *RequestBasedSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestBasedSli) ProtoMessage() {}
+
+func (x *RequestBasedSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead.
+func (*RequestBasedSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio {
+ if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok {
+ return x.GoodTotalRatio
+ }
+ return nil
+}
+
+func (x *RequestBasedSli) GetDistributionCut() *DistributionCut {
+ if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok {
+ return x.DistributionCut
+ }
+ return nil
+}
+
+type isRequestBasedSli_Method interface {
+ isRequestBasedSli_Method()
+}
+
+type RequestBasedSli_GoodTotalRatio struct {
+ // `good_total_ratio` is used when the ratio of `good_service` to
+ // `total_service` is computed from two `TimeSeries`.
+ GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"`
+}
+
+type RequestBasedSli_DistributionCut struct {
+ // `distribution_cut` is used when `good_service` is a count of values
+ // aggregated in a `Distribution` that fall into a good range. The
+ // `total_service` is the total count of all values aggregated in the
+ // `Distribution`.
+ DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"`
+}
+
+func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {}
+
+func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {}
+
+// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the
+// `good_service / total_service` ratio. The specified `TimeSeries` must have
+// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify
+// exactly two of good, bad, and total, and the relationship `good_service +
+// bad_service = total_service` will be assumed.
+type TimeSeriesRatio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying good service provided. Must have
+ // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+ // DELTA` or `MetricKind = CUMULATIVE`.
+ GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"`
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying bad service, either demanded service
+ // that was not provided or demanded service that was of inadequate quality.
+ // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have
+ // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.
+ BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"`
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying total demanded service. Must have
+ // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+ // DELTA` or `MetricKind = CUMULATIVE`.
+ TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"`
+}
+
+func (x *TimeSeriesRatio) Reset() {
+ *x = TimeSeriesRatio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesRatio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesRatio) ProtoMessage() {}
+
+func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead.
+func (*TimeSeriesRatio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *TimeSeriesRatio) GetGoodServiceFilter() string {
+ if x != nil {
+ return x.GoodServiceFilter
+ }
+ return ""
+}
+
+func (x *TimeSeriesRatio) GetBadServiceFilter() string {
+ if x != nil {
+ return x.BadServiceFilter
+ }
+ return ""
+}
+
+func (x *TimeSeriesRatio) GetTotalServiceFilter() string {
+ if x != nil {
+ return x.TotalServiceFilter
+ }
+ return ""
+}
+
+// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring
+// good service and total service. The `TimeSeries` must have `ValueType =
+// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The
+// computed `good_service` will be the estimated count of values in the
+// `Distribution` that fall within the specified `min` and `max`.
+type DistributionCut struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` aggregating values. Must have `ValueType =
+ // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.
+ DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"`
+ // Range of values considered "good." For a one-sided range, set one bound to
+ // an infinite value.
+ Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *DistributionCut) Reset() {
+ *x = DistributionCut{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DistributionCut) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionCut) ProtoMessage() {}
+
+func (x *DistributionCut) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead.
+func (*DistributionCut) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DistributionCut) GetDistributionFilter() string {
+ if x != nil {
+ return x.DistributionFilter
+ }
+ return ""
+}
+
+func (x *DistributionCut) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+// A `WindowsBasedSli` defines `good_service` as the count of time windows for
+// which the provided service was of good quality. Criteria for determining
+// if service was good are embedded in the `window_criterion`.
+type WindowsBasedSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The criterion to use for evaluating window goodness.
+ //
+ // Types that are assignable to WindowCriterion:
+ //
+ // *WindowsBasedSli_GoodBadMetricFilter
+ // *WindowsBasedSli_GoodTotalRatioThreshold
+ // *WindowsBasedSli_MetricMeanInRange
+ // *WindowsBasedSli_MetricSumInRange
+ WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"`
+ // Duration over which window quality is evaluated. Must be an integer
+ // fraction of a day and at least `60s`.
+ WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"`
+}
+
+func (x *WindowsBasedSli) Reset() {
+ *x = WindowsBasedSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli) ProtoMessage() {}
+
+func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion {
+ if m != nil {
+ return m.WindowCriterion
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetGoodBadMetricFilter() string {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok {
+ return x.GoodBadMetricFilter
+ }
+ return ""
+}
+
+func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok {
+ return x.GoodTotalRatioThreshold
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok {
+ return x.MetricMeanInRange
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok {
+ return x.MetricSumInRange
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.WindowPeriod
+ }
+ return nil
+}
+
+type isWindowsBasedSli_WindowCriterion interface {
+ isWindowsBasedSli_WindowCriterion()
+}
+
+type WindowsBasedSli_GoodBadMetricFilter struct {
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if
+ // any `true` values appear in the window.
+ GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"`
+}
+
+type WindowsBasedSli_GoodTotalRatioThreshold struct {
+ // A window is good if its `performance` is high enough.
+ GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"`
+}
+
+type WindowsBasedSli_MetricMeanInRange struct {
+ // A window is good if the metric's value is in a good range, averaged
+ // across returned streams.
+ MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"`
+}
+
+type WindowsBasedSli_MetricSumInRange struct {
+ // A window is good if the metric's value is in a good range, summed across
+ // returned streams.
+ MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"`
+}
+
+func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {}
+
+// Use a custom service to designate a service that you want to monitor
+// when none of the other service types (like App Engine, Cloud Run, or
+// a GKE type) matches your intended service.
+type Service_Custom struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Service_Custom) Reset() {
+ *x = Service_Custom{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_Custom) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_Custom) ProtoMessage() {}
+
+func (x *Service_Custom) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead.
+func (*Service_Custom) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// App Engine service. Learn more at https://cloud.google.com/appengine.
+type Service_AppEngine struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ID of the App Engine module underlying this service. Corresponds to
+ // the `module_id` resource label in the [`gae_app` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app).
+ ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"`
+}
+
+func (x *Service_AppEngine) Reset() {
+ *x = Service_AppEngine{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_AppEngine) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_AppEngine) ProtoMessage() {}
+
+func (x *Service_AppEngine) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead.
+func (*Service_AppEngine) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Service_AppEngine) GetModuleId() string {
+ if x != nil {
+ return x.ModuleId
+ }
+ return ""
+}
+
+// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints.
+type Service_CloudEndpoints struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the Cloud Endpoints service underlying this service.
+ // Corresponds to the `service` resource label in the [`api` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_api).
+ Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *Service_CloudEndpoints) Reset() {
+ *x = Service_CloudEndpoints{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_CloudEndpoints) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_CloudEndpoints) ProtoMessage() {}
+
+func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead.
+func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Service_CloudEndpoints) GetService() string {
+ if x != nil {
+ return x.Service
+ }
+ return ""
+}
+
+// Istio service scoped to a single Kubernetes cluster. Learn more at
+// https://istio.io. Clusters running OSS Istio will have their services
+// ingested as this type.
+type Service_ClusterIstio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The location of the Kubernetes cluster in which this Istio service is
+ // defined. Corresponds to the `location` resource label in `k8s_cluster`
+ // resources.
+ Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the Kubernetes cluster in which this Istio service is
+ // defined. Corresponds to the `cluster_name` resource label in
+ // `k8s_cluster` resources.
+ ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The namespace of the Istio service underlying this service. Corresponds
+ // to the `destination_service_namespace` metric label in Istio metrics.
+ ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"`
+ // The name of the Istio service underlying this service. Corresponds to the
+ // `destination_service_name` metric label in Istio metrics.
+ ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_ClusterIstio) Reset() {
+ *x = Service_ClusterIstio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_ClusterIstio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_ClusterIstio) ProtoMessage() {}
+
+func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead.
+func (*Service_ClusterIstio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *Service_ClusterIstio) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetServiceNamespace() string {
+ if x != nil {
+ return x.ServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8
+// will have their services ingested as this type.
+type Service_MeshIstio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier for the mesh in which this Istio service is defined.
+ // Corresponds to the `mesh_uid` metric label in Istio metrics.
+ MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"`
+ // The namespace of the Istio service underlying this service. Corresponds
+ // to the `destination_service_namespace` metric label in Istio metrics.
+ ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"`
+ // The name of the Istio service underlying this service. Corresponds to the
+ // `destination_service_name` metric label in Istio metrics.
+ ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_MeshIstio) Reset() {
+ *x = Service_MeshIstio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_MeshIstio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_MeshIstio) ProtoMessage() {}
+
+func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead.
+func (*Service_MeshIstio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *Service_MeshIstio) GetMeshUid() string {
+ if x != nil {
+ return x.MeshUid
+ }
+ return ""
+}
+
+func (x *Service_MeshIstio) GetServiceNamespace() string {
+ if x != nil {
+ return x.ServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_MeshIstio) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// Canonical service scoped to an Istio mesh. Anthos clusters running ASM >=
+// 1.6.8 will have their services ingested as this type.
+type Service_IstioCanonicalService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier for the Istio mesh in which this canonical service is defined.
+ // Corresponds to the `mesh_uid` metric label in
+ // [Istio metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"`
+ // The namespace of the canonical service underlying this service.
+ // Corresponds to the `destination_canonical_service_namespace` metric
+ // label in [Istio
+ // metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ CanonicalServiceNamespace string `protobuf:"bytes,3,opt,name=canonical_service_namespace,json=canonicalServiceNamespace,proto3" json:"canonical_service_namespace,omitempty"`
+ // The name of the canonical service underlying this service.
+ // Corresponds to the `destination_canonical_service_name` metric label in
+ // label in [Istio
+ // metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ CanonicalService string `protobuf:"bytes,4,opt,name=canonical_service,json=canonicalService,proto3" json:"canonical_service,omitempty"`
+}
+
+func (x *Service_IstioCanonicalService) Reset() {
+ *x = Service_IstioCanonicalService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_IstioCanonicalService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_IstioCanonicalService) ProtoMessage() {}
+
+func (x *Service_IstioCanonicalService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_IstioCanonicalService.ProtoReflect.Descriptor instead.
+func (*Service_IstioCanonicalService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5}
+}
+
+func (x *Service_IstioCanonicalService) GetMeshUid() string {
+ if x != nil {
+ return x.MeshUid
+ }
+ return ""
+}
+
+func (x *Service_IstioCanonicalService) GetCanonicalServiceNamespace() string {
+ if x != nil {
+ return x.CanonicalServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_IstioCanonicalService) GetCanonicalService() string {
+ if x != nil {
+ return x.CanonicalService
+ }
+ return ""
+}
+
+// Cloud Run service. Learn more at https://cloud.google.com/run.
+type Service_CloudRun struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the Cloud Run service. Corresponds to the `service_name`
+ // resource label in the [`cloud_run_revision` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ // The location the service is run. Corresponds to the `location`
+ // resource label in the [`cloud_run_revision` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+}
+
+func (x *Service_CloudRun) Reset() {
+ *x = Service_CloudRun{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_CloudRun) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_CloudRun) ProtoMessage() {}
+
+func (x *Service_CloudRun) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead.
+func (*Service_CloudRun) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6}
+}
+
+func (x *Service_CloudRun) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+func (x *Service_CloudRun) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+// GKE Namespace. The field names correspond to the resource metadata labels
+// on monitored resources that fall under a namespace (for example,
+// `k8s_container` or `k8s_pod`).
+type Service_GkeNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of this namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+}
+
+func (x *Service_GkeNamespace) Reset() {
+ *x = Service_GkeNamespace{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeNamespace) ProtoMessage() {}
+
+func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead.
+func (*Service_GkeNamespace) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7}
+}
+
+func (x *Service_GkeNamespace) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond
+// to the metadata labels on monitored resources that fall under a workload
+// (for example, `k8s_container` or `k8s_pod`).
+type Service_GkeWorkload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of the parent namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+ // The type of this workload (for example, "Deployment" or "DaemonSet")
+ TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"`
+ // The name of this workload.
+ TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"`
+}
+
+func (x *Service_GkeWorkload) Reset() {
+ *x = Service_GkeWorkload{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeWorkload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeWorkload) ProtoMessage() {}
+
+func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead.
+func (*Service_GkeWorkload) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8}
+}
+
+func (x *Service_GkeWorkload) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetTopLevelControllerType() string {
+ if x != nil {
+ return x.TopLevelControllerType
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetTopLevelControllerName() string {
+ if x != nil {
+ return x.TopLevelControllerName
+ }
+ return ""
+}
+
+// GKE Service. The "service" here represents a
+// [Kubernetes service
+// object](https://kubernetes.io/docs/concepts/services-networking/service).
+// The field names correspond to the resource labels on [`k8s_service`
+// monitored
+// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service).
+type Service_GkeService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of the parent namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+ // The name of this service.
+ ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_GkeService) Reset() {
+ *x = Service_GkeService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeService) ProtoMessage() {}
+
+func (x *Service_GkeService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead.
+func (*Service_GkeService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9}
+}
+
+func (x *Service_GkeService) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// A well-known service type, defined by its service type and service labels.
+// Documentation and examples
+// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+type Service_BasicService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of service that this basic service defines, e.g.
+ // APP_ENGINE service type.
+ // Documentation and valid values
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
+ // Labels that specify the resource that emits the monitoring data which
+ // is used for SLO reporting of this `Service`.
+ // Documentation and valid values for given service types
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Service_BasicService) Reset() {
+ *x = Service_BasicService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_BasicService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_BasicService) ProtoMessage() {}
+
+func (x *Service_BasicService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[19]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead.
+func (*Service_BasicService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10}
+}
+
+func (x *Service_BasicService) GetServiceType() string {
+ if x != nil {
+ return x.ServiceType
+ }
+ return ""
+}
+
+func (x *Service_BasicService) GetServiceLabels() map[string]string {
+ if x != nil {
+ return x.ServiceLabels
+ }
+ return nil
+}
+
+// Configuration for how to query telemetry on a Service.
+type Service_Telemetry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full name of the resource that defines this service. Formatted as
+ // described in https://cloud.google.com/apis/design/resource_names.
+ ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+}
+
+func (x *Service_Telemetry) Reset() {
+ *x = Service_Telemetry{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_Telemetry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_Telemetry) ProtoMessage() {}
+
+func (x *Service_Telemetry) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead.
+func (*Service_Telemetry) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11}
+}
+
+func (x *Service_Telemetry) GetResourceName() string {
+ if x != nil {
+ return x.ResourceName
+ }
+ return ""
+}
+
+// Future parameters for the availability SLI.
+type BasicSli_AvailabilityCriteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *BasicSli_AvailabilityCriteria) Reset() {
+ *x = BasicSli_AvailabilityCriteria{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli_AvailabilityCriteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli_AvailabilityCriteria) ProtoMessage() {}
+
+func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[24]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead.
+func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// Parameters for a latency threshold SLI.
+type BasicSli_LatencyCriteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Good service is defined to be the count of requests made to this service
+ // that return in no more than `threshold`.
+ Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *BasicSli_LatencyCriteria) Reset() {
+ *x = BasicSli_LatencyCriteria{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli_LatencyCriteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli_LatencyCriteria) ProtoMessage() {}
+
+func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[25]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead.
+func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration {
+ if x != nil {
+ return x.Threshold
+ }
+ return nil
+}
+
+// A `PerformanceThreshold` is used when each window is good when that window
+// has a sufficiently high `performance`.
+type WindowsBasedSli_PerformanceThreshold struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The means, either a request-based SLI or a basic SLI, by which to compute
+ // performance over a window.
+ //
+ // Types that are assignable to Type:
+ //
+ // *WindowsBasedSli_PerformanceThreshold_Performance
+ // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance
+ Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"`
+ // If window `performance >= threshold`, the window is counted as good.
+ Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) Reset() {
+ *x = WindowsBasedSli_PerformanceThreshold{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {}
+
+func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[26]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli {
+ if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok {
+ return x.Performance
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli {
+ if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok {
+ return x.BasicSliPerformance
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+type isWindowsBasedSli_PerformanceThreshold_Type interface {
+ isWindowsBasedSli_PerformanceThreshold_Type()
+}
+
+type WindowsBasedSli_PerformanceThreshold_Performance struct {
+ // `RequestBasedSli` to evaluate to judge window quality.
+ Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"`
+}
+
+type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct {
+ // `BasicSli` to evaluate to judge window quality.
+ BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"`
+}
+
+func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() {
+}
+
+func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() {
+}
+
+// A `MetricRange` is used when each window is good when the value x of a
+// single `TimeSeries` satisfies `range.min <= x <= range.max`. The provided
+// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and
+// `MetricKind = GAUGE`.
+type WindowsBasedSli_MetricRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying the `TimeSeries` to use for evaluating window quality.
+ TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // Range of values considered "good." For a one-sided range, set one bound
+ // to an infinite value.
+ Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *WindowsBasedSli_MetricRange) Reset() {
+ *x = WindowsBasedSli_MetricRange{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli_MetricRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli_MetricRange) ProtoMessage() {}
+
+func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1}
+}
+
+func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return ""
+}
+
+func (x *WindowsBasedSli_MetricRange) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_service_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68,
+ 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21,
+ 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00,
+ 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f,
+ 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49,
+ 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69,
+ 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e,
+ 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f,
+ 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b,
+ 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67,
+ 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
+ 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61,
+ 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12,
+ 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a,
+ 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70,
+ 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c,
+ 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a,
+ 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08,
+ 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69,
+ 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b,
+ 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11,
+ 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63,
+ 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12,
+ 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70,
+ 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f,
+ 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22,
+ 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21,
+ 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c,
+ 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a,
+ 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12,
+ 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c,
+ 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67,
+ 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12,
+ 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f,
+ 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f,
+ 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e,
+ 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c,
+ 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75,
+ 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73,
+ 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77,
+ 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02,
+ 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca,
+ 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60,
+ 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f,
+ 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d,
+ 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65,
+ 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53,
+ 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c,
+ 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f,
+ 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69,
+ 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12,
+ 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
+ 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63,
+ 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69,
+ 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65,
+ 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63,
+ 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74,
+ 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f,
+ 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12,
+ 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74,
+ 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f,
+ 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03,
+ 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f,
+ 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f,
+ 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69,
+ 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52,
+ 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74,
+ 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54,
+ 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e,
+ 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f,
+ 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c,
+ 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75,
+ 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75,
+ 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f,
+ 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f,
+ 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64,
+ 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64,
+ 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74,
+ 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69,
+ 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f,
+ 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f,
+ 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49,
+ 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65,
+ 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73,
+ 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e,
+ 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69,
+ 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64,
+ 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a,
+ 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33,
+ 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
+var file_google_monitoring_v3_service_proto_goTypes = []any{
+ (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View
+ (*Service)(nil), // 1: google.monitoring.v3.Service
+ (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective
+ (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator
+ (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli
+ (*Range)(nil), // 5: google.monitoring.v3.Range
+ (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli
+ (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio
+ (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut
+ (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli
+ (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom
+ (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine
+ (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints
+ (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio
+ (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio
+ (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService
+ (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun
+ (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace
+ (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload
+ (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService
+ (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService
+ (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry
+ nil, // 22: google.monitoring.v3.Service.UserLabelsEntry
+ nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry
+ nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry
+ (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria
+ (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria
+ (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold
+ (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange
+ (*durationpb.Duration)(nil), // 29: google.protobuf.Duration
+ (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod
+}
+var file_google_monitoring_v3_service_proto_depIdxs = []int32{
+ 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom
+ 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine
+ 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints
+ 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio
+ 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio
+ 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService
+ 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun
+ 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace
+ 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload
+ 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService
+ 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService
+ 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry
+ 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry
+ 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator
+ 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration
+ 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod
+ 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry
+ 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli
+ 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli
+ 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli
+ 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria
+ 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria
+ 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio
+ 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut
+ 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range
+ 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold
+ 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange
+ 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange
+ 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration
+ 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry
+ 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration
+ 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli
+ 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli
+ 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range
+ 34, // [34:34] is the sub-list for method output_type
+ 34, // [34:34] is the sub-list for method input_type
+ 34, // [34:34] is the sub-list for extension type_name
+ 34, // [34:34] is the sub-list for extension extendee
+ 0, // [0:34] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_service_proto_init() }
+func file_google_monitoring_v3_service_proto_init() {
+ if File_google_monitoring_v3_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{
+ (*Service_Custom_)(nil),
+ (*Service_AppEngine_)(nil),
+ (*Service_CloudEndpoints_)(nil),
+ (*Service_ClusterIstio_)(nil),
+ (*Service_MeshIstio_)(nil),
+ (*Service_IstioCanonicalService_)(nil),
+ (*Service_CloudRun_)(nil),
+ (*Service_GkeNamespace_)(nil),
+ (*Service_GkeWorkload_)(nil),
+ (*Service_GkeService_)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{
+ (*ServiceLevelObjective_RollingPeriod)(nil),
+ (*ServiceLevelObjective_CalendarPeriod)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{
+ (*ServiceLevelIndicator_BasicSli)(nil),
+ (*ServiceLevelIndicator_RequestBased)(nil),
+ (*ServiceLevelIndicator_WindowsBased)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{
+ (*BasicSli_Availability)(nil),
+ (*BasicSli_Latency)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{
+ (*RequestBasedSli_GoodTotalRatio)(nil),
+ (*RequestBasedSli_DistributionCut)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{
+ (*WindowsBasedSli_GoodBadMetricFilter)(nil),
+ (*WindowsBasedSli_GoodTotalRatioThreshold)(nil),
+ (*WindowsBasedSli_MetricMeanInRange)(nil),
+ (*WindowsBasedSli_MetricSumInRange)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{
+ (*WindowsBasedSli_PerformanceThreshold_Performance)(nil),
+ (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 28,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_service_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_service_proto = out.File
+ file_google_monitoring_v3_service_proto_rawDesc = nil
+ file_google_monitoring_v3_service_proto_goTypes = nil
+ file_google_monitoring_v3_service_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
new file mode 100644
index 000000000..08c2e08e2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
@@ -0,0 +1,1626 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/service_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `CreateService` request.
+type CreateServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource
+ // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the
+ // parent Metrics Scope. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The Service id to use for this Service. If omitted, an id will be
+ // generated instead. Must match the pattern `[a-z0-9\-]+`
+ ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ // Required. The `Service` to create.
+ Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *CreateServiceRequest) Reset() {
+ *x = CreateServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateServiceRequest) ProtoMessage() {}
+
+func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead.
+func (*CreateServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateServiceRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateServiceRequest) GetServiceId() string {
+ if x != nil {
+ return x.ServiceId
+ }
+ return ""
+}
+
+func (x *CreateServiceRequest) GetService() *Service {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+// The `GetService` request.
+type GetServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `Service`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetServiceRequest) Reset() {
+ *x = GetServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetServiceRequest) ProtoMessage() {}
+
+func (x *GetServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead.
+func (*GetServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetServiceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListServices` request.
+type ListServicesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent containing the listed services,
+ // either a [project](https://cloud.google.com/monitoring/api/v3#project_name)
+ // or a Monitoring Metrics Scope. The formats are:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ // workspaces/[HOST_PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // A filter specifying what `Service`s to return. The filter supports
+ // filtering on a particular service-identifier type or one of its attributes.
+ //
+ // To filter on a particular service-identifier type, the `identifier_case`
+ // refers to which option in the `identifier` field is populated. For example,
+ // the filter `identifier_case = "CUSTOM"` would match all services with a
+ // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE",
+ // "MESH_ISTIO", and the other options listed at
+ // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service
+ //
+ // To filter on an attribute of a service-identifier type, apply the filter
+ // name by using the snake case of the service-identifier type and the
+ // attribute of that service-identifier type, and join the two with a period.
+ // For example, to filter by the `meshUid` field of the `MeshIstio`
+ // service-identifier type, you must filter on `mesh_istio.mesh_uid =
+ // "123"` to match all services with mesh UID "123". Service-identifier types
+ // and their attributes are described at
+ // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A non-negative number that is the maximum number of results to return.
+ // When 0, use default page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListServicesRequest) Reset() {
+ *x = ListServicesRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesRequest) ProtoMessage() {}
+
+func (x *ListServicesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead.
+func (*ListServicesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListServicesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListServicesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListServicesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListServicesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListServices` response.
+type ListServicesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `Service`s matching the specified filter.
+ Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListServicesResponse) Reset() {
+ *x = ListServicesResponse{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesResponse) ProtoMessage() {}
+
+func (x *ListServicesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead.
+func (*ListServicesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListServicesResponse) GetServices() []*Service {
+ if x != nil {
+ return x.Services
+ }
+ return nil
+}
+
+func (x *ListServicesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `UpdateService` request.
+type UpdateServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `Service` to draw updates from.
+ // The given `name` specifies the resource to update.
+ Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ // A set of field paths defining which fields to use for the update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateServiceRequest) Reset() {
+ *x = UpdateServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateServiceRequest) ProtoMessage() {}
+
+func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead.
+func (*UpdateServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateServiceRequest) GetService() *Service {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// The `DeleteService` request.
+type DeleteServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `Service` to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteServiceRequest) Reset() {
+ *x = DeleteServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteServiceRequest) ProtoMessage() {}
+
+func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead.
+func (*DeleteServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteServiceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateServiceLevelObjective` request.
+type CreateServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent `Service`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The ServiceLevelObjective id to use for this
+ // ServiceLevelObjective. If omitted, an id will be generated instead. Must
+ // match the pattern `^[a-zA-Z0-9-_:.]+$`
+ ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"`
+ // Required. The `ServiceLevelObjective` to create.
+ // The provided `name` will be respected if no `ServiceLevelObjective` exists
+ // with this name.
+ ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"`
+}
+
+func (x *CreateServiceLevelObjectiveRequest) Reset() {
+ *x = CreateServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string {
+ if x != nil {
+ return x.ServiceLevelObjectiveId
+ }
+ return ""
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjective
+ }
+ return nil
+}
+
+// The `GetServiceLevelObjective` request.
+type GetServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `ServiceLevelObjective` to get. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the
+ // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the
+ // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the
+ // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed.
+ View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"`
+}
+
+func (x *GetServiceLevelObjectiveRequest) Reset() {
+ *x = GetServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetServiceLevelObjectiveRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View {
+ if x != nil {
+ return x.View
+ }
+ return ServiceLevelObjective_VIEW_UNSPECIFIED
+}
+
+// The `ListServiceLevelObjectives` request.
+type ListServiceLevelObjectivesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent containing the listed SLOs, either a
+ // project or a Monitoring Metrics Scope. The formats are:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // A filter specifying what `ServiceLevelObjective`s to return.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A non-negative number that is the maximum number of results to return.
+ // When 0, use default page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each
+ // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the
+ // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the
+ // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed.
+ View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"`
+}
+
+func (x *ListServiceLevelObjectivesRequest) Reset() {
+ *x = ListServiceLevelObjectivesRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceLevelObjectivesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceLevelObjectivesRequest) ProtoMessage() {}
+
+func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead.
+func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View {
+ if x != nil {
+ return x.View
+ }
+ return ServiceLevelObjective_VIEW_UNSPECIFIED
+}
+
+// The `ListServiceLevelObjectives` response.
+type ListServiceLevelObjectivesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `ServiceLevelObjective`s matching the specified filter.
+ ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListServiceLevelObjectivesResponse) Reset() {
+ *x = ListServiceLevelObjectivesResponse{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceLevelObjectivesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceLevelObjectivesResponse) ProtoMessage() {}
+
+func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead.
+func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjectives
+ }
+ return nil
+}
+
+func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `UpdateServiceLevelObjective` request.
+type UpdateServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `ServiceLevelObjective` to draw updates from.
+ // The given `name` specifies the resource to update.
+ ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"`
+ // A set of field paths defining which fields to use for the update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) Reset() {
+ *x = UpdateServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjective
+ }
+ return nil
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// The `DeleteServiceLevelObjective` request.
+type DeleteServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `ServiceLevelObjective` to delete. The
+ // format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) Reset() {
+ *x = DeleteServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
+ 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49,
+ 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f,
+ 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04,
+ 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69,
+ 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23,
+ 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52,
+ 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
+ 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a,
+ 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a,
+ 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0xda, 0x41, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x32,
+ 0x21, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0xda, 0x41, 0x1e, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x4d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0xc1,
+ 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22,
+ 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x22, 0x85, 0x01, 0xda, 0x41, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x65, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x32, 0x4a, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01,
+ 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01,
+ 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd8, 0x01, 0x0a, 0x18, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_service_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_google_monitoring_v3_service_service_proto_goTypes = []any{
+ (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest
+ (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest
+ (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest
+ (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse
+ (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest
+ (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest
+ (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest
+ (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest
+ (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest
+ (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse
+ (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest
+ (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest
+ (*Service)(nil), // 12: google.monitoring.v3.Service
+ (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask
+ (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective
+ (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View
+ (*emptypb.Empty)(nil), // 16: google.protobuf.Empty
+}
+var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{
+ 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service
+ 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service
+ 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service
+ 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View
+ 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View
+ 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest
+ 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest
+ 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest
+ 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest
+ 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest
+ 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest
+ 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest
+ 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest
+ 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest
+ 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest
+ 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service
+ 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service
+ 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse
+ 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service
+ 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty
+ 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse
+ 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty
+ 20, // [20:30] is the sub-list for method output_type
+ 10, // [10:20] is the sub-list for method input_type
+ 10, // [10:10] is the sub-list for extension type_name
+ 10, // [10:10] is the sub-list for extension extendee
+ 0, // [0:10] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_service_service_proto_init() }
+func file_google_monitoring_v3_service_service_proto_init() {
+ if File_google_monitoring_v3_service_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_service_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_service_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_service_service_proto = out.File
+ file_google_monitoring_v3_service_service_proto_rawDesc = nil
+ file_google_monitoring_v3_service_service_proto_goTypes = nil
+ file_google_monitoring_v3_service_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ServiceMonitoringServiceClient interface {
+ // Create a `Service`.
+ CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // Get the named `Service`.
+ GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // List `Service`s for this Metrics Scope.
+ ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error)
+ // Update this `Service`.
+ UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // Soft delete this `Service`.
+ DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Create a `ServiceLevelObjective` for the given `Service`.
+ CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // Get a `ServiceLevelObjective` by name.
+ GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // List the `ServiceLevelObjective`s for the given `Service`.
+ ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error)
+ // Update the given `ServiceLevelObjective`.
+ UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // Delete the given `ServiceLevelObjective`.
+ DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type serviceMonitoringServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient {
+ return &serviceMonitoringServiceClient{cc}
+}
+
+func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) {
+ out := new(ListServicesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) {
+ out := new(ListServiceLevelObjectivesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service.
+type ServiceMonitoringServiceServer interface {
+ // Create a `Service`.
+ CreateService(context.Context, *CreateServiceRequest) (*Service, error)
+ // Get the named `Service`.
+ GetService(context.Context, *GetServiceRequest) (*Service, error)
+ // List `Service`s for this Metrics Scope.
+ ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error)
+ // Update this `Service`.
+ UpdateService(context.Context, *UpdateServiceRequest) (*Service, error)
+ // Soft delete this `Service`.
+ DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error)
+ // Create a `ServiceLevelObjective` for the given `Service`.
+ CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // Get a `ServiceLevelObjective` by name.
+ GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // List the `ServiceLevelObjective`s for the given `Service`.
+ ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error)
+ // Update the given `ServiceLevelObjective`.
+ UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // Delete the given `ServiceLevelObjective`.
+ DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error)
+}
+
+// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedServiceMonitoringServiceServer struct {
+}
+
+func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented")
+}
+
+func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) {
+ s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv)
+}
+
+func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).GetService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListServicesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListServiceLevelObjectivesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.ServiceMonitoringService",
+ HandlerType: (*ServiceMonitoringServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateService",
+ Handler: _ServiceMonitoringService_CreateService_Handler,
+ },
+ {
+ MethodName: "GetService",
+ Handler: _ServiceMonitoringService_GetService_Handler,
+ },
+ {
+ MethodName: "ListServices",
+ Handler: _ServiceMonitoringService_ListServices_Handler,
+ },
+ {
+ MethodName: "UpdateService",
+ Handler: _ServiceMonitoringService_UpdateService_Handler,
+ },
+ {
+ MethodName: "DeleteService",
+ Handler: _ServiceMonitoringService_DeleteService_Handler,
+ },
+ {
+ MethodName: "CreateServiceLevelObjective",
+ Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "GetServiceLevelObjective",
+ Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "ListServiceLevelObjectives",
+ Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler,
+ },
+ {
+ MethodName: "UpdateServiceLevelObjective",
+ Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "DeleteServiceLevelObjective",
+ Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/service_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
new file mode 100644
index 000000000..861e045f2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
@@ -0,0 +1,310 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/snooze.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A `Snooze` will prevent any alerts from being opened, and close any that
+// are already open. The `Snooze` will work on alerts that match the
+// criteria defined in the `Snooze`. The `Snooze` will be active from
+// `interval.start_time` through `interval.end_time`.
+type Snooze struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Identifier. The name of the `Snooze`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
+ //
+ // The ID of the `Snooze` will be generated by the system.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. This defines the criteria for applying the `Snooze`. See
+ // `Criteria` for more information.
+ Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"`
+ // Required. The `Snooze` will be active from `interval.start_time` through
+ // `interval.end_time`.
+ // `interval.start_time` cannot be in the past. There is a 15 second clock
+ // skew to account for the time it takes for a request to reach the API from
+ // the UI.
+ Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
+ // Required. A display name for the `Snooze`. This can be, at most, 512
+ // unicode characters.
+ DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+}
+
+func (x *Snooze) Reset() {
+ *x = Snooze{}
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Snooze) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Snooze) ProtoMessage() {}
+
+func (x *Snooze) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Snooze.ProtoReflect.Descriptor instead.
+func (*Snooze) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Snooze) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Snooze) GetCriteria() *Snooze_Criteria {
+ if x != nil {
+ return x.Criteria
+ }
+ return nil
+}
+
+func (x *Snooze) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *Snooze) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The
+// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s
+// whose names are supplied.
+type Snooze_Criteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The specific `AlertPolicy` names for the alert that should be snoozed.
+ // The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]
+ //
+ // There is a limit of 16 policies per snooze. This limit is checked during
+ // snooze creation.
+ // Exactly 1 alert policy is required if `filter` is specified at the same
+ // time.
+ Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
+ // Optional. The filter string to match on Alert fields when silencing the
+ // alerts. It follows the standard https://google.aip.dev/160 syntax.
+ // A filter string used to apply the snooze to specific incidents
+ // that have matching filter values.
+ // Filters can be defined for snoozes that apply to one alerting
+ // policy.
+ // Filters must be a string formatted as one or more resource labels with
+ // specific label values. If multiple resource labels are used, then they
+ // must be connected with an AND operator. For example, the following filter
+ // applies the snooze to incidents that have an instance ID of
+ // `1234567890` and a zone of `us-central1-a`:
+ //
+ // resource.labels.instance_id="1234567890" AND
+ // resource.labels.zone="us-central1-a"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+}
+
+func (x *Snooze_Criteria) Reset() {
+ *x = Snooze_Criteria{}
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Snooze_Criteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Snooze_Criteria) ProtoMessage() {}
+
+func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead.
+func (*Snooze_Criteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Snooze_Criteria) GetPolicies() []string {
+ if x != nil {
+ return x.Policies
+ }
+ return nil
+}
+
+func (x *Snooze_Criteria) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x06, 0x53, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08,
+ 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69,
+ 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74,
+ 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73,
+ 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
+ 0x65, 0x1a, 0x6a, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a,
+ 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42,
+ 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x4a, 0xea,
+ 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73,
+ 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc
+)
+
+func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_snooze_proto_rawDescData
+}
+
+var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_monitoring_v3_snooze_proto_goTypes = []any{
+ (*Snooze)(nil), // 0: google.monitoring.v3.Snooze
+ (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria
+ (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval
+}
+var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria
+ 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_snooze_proto_init() }
+func file_google_monitoring_v3_snooze_proto_init() {
+ if File_google_monitoring_v3_snooze_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_snooze_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_snooze_proto = out.File
+ file_google_monitoring_v3_snooze_proto_rawDesc = nil
+ file_google_monitoring_v3_snooze_proto_goTypes = nil
+ file_google_monitoring_v3_snooze_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
new file mode 100644
index 000000000..c562d60bc
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
@@ -0,0 +1,793 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/snooze_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The message definition for creating a `Snooze`. Users must provide the body
+// of the `Snooze` to be created but must omit the `Snooze` field, `name`.
+type CreateSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // a `Snooze` should be created. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The `Snooze` to create. Omit the `name` field, as it will be
+ // filled in by the API.
+ Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"`
+}
+
+func (x *CreateSnoozeRequest) Reset() {
+ *x = CreateSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateSnoozeRequest) ProtoMessage() {}
+
+func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateSnoozeRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateSnoozeRequest) GetSnooze() *Snooze {
+ if x != nil {
+ return x.Snooze
+ }
+ return nil
+}
+
+// The message definition for listing `Snooze`s associated with the given
+// `parent`, satisfying the optional `filter`.
+type ListSnoozesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // `Snooze`s should be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. Optional filter to restrict results to the given criteria. The
+ // following fields are supported.
+ //
+ // - `interval.start_time`
+ // - `interval.end_time`
+ //
+ // For example:
+ //
+ // interval.start_time > "2022-03-11T00:00:00-08:00" AND
+ // interval.end_time < "2022-03-12T00:00:00-08:00"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. The maximum number of results to return for a single query. The
+ // server may further constrain the maximum number of results returned in a
+ // single page. The value should be in the range [1, 1000]. If the value given
+ // is outside this range, the server will decide the number of results to be
+ // returned.
+ PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. The `next_page_token` from a previous call to
+ // `ListSnoozesRequest` to get the next page of results.
+ PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListSnoozesRequest) Reset() {
+ *x = ListSnoozesRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSnoozesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSnoozesRequest) ProtoMessage() {}
+
+func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead.
+func (*ListSnoozesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListSnoozesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListSnoozesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListSnoozesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListSnoozesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The results of a successful `ListSnoozes` call, containing the matching
+// `Snooze`s.
+type ListSnoozesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // `Snooze`s matching this list call.
+ Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"`
+ // Page token for repeated calls to `ListSnoozes`, to fetch additional pages
+ // of results. If this is empty or missing, there are no more pages.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListSnoozesResponse) Reset() {
+ *x = ListSnoozesResponse{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSnoozesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSnoozesResponse) ProtoMessage() {}
+
+func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead.
+func (*ListSnoozesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListSnoozesResponse) GetSnoozes() []*Snooze {
+ if x != nil {
+ return x.Snoozes
+ }
+ return nil
+}
+
+func (x *ListSnoozesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The message definition for retrieving a `Snooze`. Users must specify the
+// field, `name`, which identifies the `Snooze`.
+type GetSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The ID of the `Snooze` to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetSnoozeRequest) Reset() {
+ *x = GetSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSnoozeRequest) ProtoMessage() {}
+
+func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*GetSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *GetSnoozeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The message definition for updating a `Snooze`. The field, `snooze.name`
+// identifies the `Snooze` to be updated. The remainder of `snooze` gives the
+// content the `Snooze` in question will be assigned.
+//
+// What fields can be updated depends on the start time and end time of the
+// `Snooze`.
+//
+// - end time is in the past: These `Snooze`s are considered
+// read-only and cannot be updated.
+// - start time is in the past and end time is in the future: `display_name`
+// and `interval.end_time` can be updated.
+// - start time is in the future: `display_name`, `interval.start_time` and
+// `interval.end_time` can be updated.
+type UpdateSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `Snooze` to update. Must have the name field present.
+ Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"`
+ // Required. The fields to update.
+ //
+ // For each field listed in `update_mask`:
+ //
+ // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a
+ // value for that field, the value of the field in the existing `Snooze`
+ // will be set to the value of the field in the supplied `Snooze`.
+ // - If the field does not have a value in the supplied `Snooze`, the field
+ // in the existing `Snooze` is set to its default value.
+ //
+ // Fields not listed retain their existing value.
+ //
+ // The following are the field names that are accepted in `update_mask`:
+ //
+ // - `display_name`
+ // - `interval.start_time`
+ // - `interval.end_time`
+ //
+ // That said, the start time and end time of the `Snooze` determines which
+ // fields can legally be updated. Before attempting an update, users should
+ // consult the documentation for `UpdateSnoozeRequest`, which talks about
+ // which fields can be updated.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateSnoozeRequest) Reset() {
+ *x = UpdateSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateSnoozeRequest) ProtoMessage() {}
+
+func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateSnoozeRequest) GetSnooze() *Snooze {
+ if x != nil {
+ return x.Snooze
+ }
+ return nil
+}
+
+func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9,
+ 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
+ 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
+ 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
+ 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
+ 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a,
+ 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
+ 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12,
+ 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32,
+ 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
+ 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
+ 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72,
+ 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_snooze_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{
+ (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest
+ (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest
+ (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse
+ (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest
+ (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest
+ (*Snooze)(nil), // 5: google.monitoring.v3.Snooze
+ (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask
+}
+var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
+ 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze
+ 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
+ 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest
+ 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest
+ 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest
+ 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest
+ 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze
+ 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse
+ 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze
+ 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze
+ 8, // [8:12] is the sub-list for method output_type
+ 4, // [4:8] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_snooze_service_proto_init() }
+func file_google_monitoring_v3_snooze_service_proto_init() {
+ if File_google_monitoring_v3_snooze_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_snooze_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_snooze_service_proto = out.File
+ file_google_monitoring_v3_snooze_service_proto_rawDesc = nil
+ file_google_monitoring_v3_snooze_service_proto_goTypes = nil
+ file_google_monitoring_v3_snooze_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SnoozeServiceClient is the client API for SnoozeService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SnoozeServiceClient interface {
+ // Creates a `Snooze` that will prevent alerts, which match the provided
+ // criteria, from being opened. The `Snooze` applies for a specific time
+ // interval.
+ CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+ // Lists the `Snooze`s associated with a project. Can optionally pass in
+ // `filter`, which specifies predicates to match `Snooze`s.
+ ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error)
+ // Retrieves a `Snooze` by `name`.
+ GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+ // Updates a `Snooze`, identified by its `name`, with the parameters in the
+ // given `Snooze` object.
+ UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+}
+
+type snoozeServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient {
+ return &snoozeServiceClient{cc}
+}
+
+func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) {
+ out := new(ListSnoozesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SnoozeServiceServer is the server API for SnoozeService service.
+type SnoozeServiceServer interface {
+ // Creates a `Snooze` that will prevent alerts, which match the provided
+ // criteria, from being opened. The `Snooze` applies for a specific time
+ // interval.
+ CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error)
+ // Lists the `Snooze`s associated with a project. Can optionally pass in
+ // `filter`, which specifies predicates to match `Snooze`s.
+ ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error)
+ // Retrieves a `Snooze` by `name`.
+ GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error)
+ // Updates a `Snooze`, identified by its `name`, with the parameters in the
+ // given `Snooze` object.
+ UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error)
+}
+
+// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedSnoozeServiceServer struct {
+}
+
+func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented")
+}
+
+func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) {
+ s.RegisterService(&_SnoozeService_serviceDesc, srv)
+}
+
+func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).CreateSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListSnoozesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).ListSnoozes(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).GetSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _SnoozeService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.SnoozeService",
+ HandlerType: (*SnoozeServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateSnooze",
+ Handler: _SnoozeService_CreateSnooze_Handler,
+ },
+ {
+ MethodName: "ListSnoozes",
+ Handler: _SnoozeService_ListSnoozes_Handler,
+ },
+ {
+ MethodName: "GetSnooze",
+ Handler: _SnoozeService_GetSnooze_Handler,
+ },
+ {
+ MethodName: "UpdateSnooze",
+ Handler: _SnoozeService_UpdateSnooze_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/snooze_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
new file mode 100644
index 000000000..23f42835f
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
@@ -0,0 +1,172 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/span_context.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The context of a span. This is attached to an
+// [Exemplar][google.api.Distribution.Exemplar]
+// in [Distribution][google.api.Distribution] values during aggregation.
+//
+// It contains the name of a span with format:
+//
+// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
+type SpanContext struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource name of the span. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
+ //
+ // `[TRACE_ID]` is a unique identifier for a trace within a project;
+ // it is a 32-character hexadecimal encoding of a 16-byte array.
+ //
+ // `[SPAN_ID]` is a unique identifier for a span within a trace; it
+ // is a 16-character hexadecimal encoding of an 8-byte array.
+ SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"`
+}
+
+func (x *SpanContext) Reset() {
+ *x = SpanContext{}
+ mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SpanContext) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SpanContext) ProtoMessage() {}
+
+func (x *SpanContext) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead.
+func (*SpanContext) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SpanContext) GetSpanName() string {
+ if x != nil {
+ return x.SpanName
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22,
+ 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc
+)
+
+func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_span_context_proto_rawDescData
+}
+
+var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_span_context_proto_goTypes = []any{
+ (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext
+}
+var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_span_context_proto_init() }
+func file_google_monitoring_v3_span_context_proto_init() {
+ if File_google_monitoring_v3_span_context_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_span_context_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_span_context_proto = out.File
+ file_google_monitoring_v3_span_context_proto_rawDesc = nil
+ file_google_monitoring_v3_span_context_proto_goTypes = nil
+ file_google_monitoring_v3_span_context_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
new file mode 100644
index 000000000..f303ac251
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
@@ -0,0 +1,2531 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/uptime.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The regions from which an Uptime check can be run.
+type UptimeCheckRegion int32
+
+const (
+ // Default value if no region is specified. Will result in Uptime checks
+ // running from all regions.
+ UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0
+ // Allows checks to run from locations within the United States of America.
+ UptimeCheckRegion_USA UptimeCheckRegion = 1
+ // Allows checks to run from locations within the continent of Europe.
+ UptimeCheckRegion_EUROPE UptimeCheckRegion = 2
+ // Allows checks to run from locations within the continent of South
+ // America.
+ UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3
+ // Allows checks to run from locations within the Asia Pacific area (ex:
+ // Singapore).
+ UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4
+ // Allows checks to run from locations within the western United States of
+ // America
+ UptimeCheckRegion_USA_OREGON UptimeCheckRegion = 5
+ // Allows checks to run from locations within the central United States of
+ // America
+ UptimeCheckRegion_USA_IOWA UptimeCheckRegion = 6
+ // Allows checks to run from locations within the eastern United States of
+ // America
+ UptimeCheckRegion_USA_VIRGINIA UptimeCheckRegion = 7
+)
+
+// Enum value maps for UptimeCheckRegion.
+var (
+ UptimeCheckRegion_name = map[int32]string{
+ 0: "REGION_UNSPECIFIED",
+ 1: "USA",
+ 2: "EUROPE",
+ 3: "SOUTH_AMERICA",
+ 4: "ASIA_PACIFIC",
+ 5: "USA_OREGON",
+ 6: "USA_IOWA",
+ 7: "USA_VIRGINIA",
+ }
+ UptimeCheckRegion_value = map[string]int32{
+ "REGION_UNSPECIFIED": 0,
+ "USA": 1,
+ "EUROPE": 2,
+ "SOUTH_AMERICA": 3,
+ "ASIA_PACIFIC": 4,
+ "USA_OREGON": 5,
+ "USA_IOWA": 6,
+ "USA_VIRGINIA": 7,
+ }
+)
+
+func (x UptimeCheckRegion) Enum() *UptimeCheckRegion {
+ p := new(UptimeCheckRegion)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckRegion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor()
+}
+
+func (UptimeCheckRegion) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[0]
+}
+
+func (x UptimeCheckRegion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckRegion.Descriptor instead.
+func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0}
+}
+
+// The supported resource types that can be used as values of
+// `group_resource.resource_type`.
+// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types.
+// The resource types `gae_app` and `uptime_url` are not valid here because
+// group checks on App Engine modules and URLs are not allowed.
+type GroupResourceType int32
+
+const (
+ // Default value (not valid).
+ GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0
+ // A group of instances from Google Cloud Platform (GCP) or
+ // Amazon Web Services (AWS).
+ GroupResourceType_INSTANCE GroupResourceType = 1
+ // A group of Amazon ELB load balancers.
+ GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2
+)
+
+// Enum value maps for GroupResourceType.
+var (
+ GroupResourceType_name = map[int32]string{
+ 0: "RESOURCE_TYPE_UNSPECIFIED",
+ 1: "INSTANCE",
+ 2: "AWS_ELB_LOAD_BALANCER",
+ }
+ GroupResourceType_value = map[string]int32{
+ "RESOURCE_TYPE_UNSPECIFIED": 0,
+ "INSTANCE": 1,
+ "AWS_ELB_LOAD_BALANCER": 2,
+ }
+)
+
+func (x GroupResourceType) Enum() *GroupResourceType {
+ p := new(GroupResourceType)
+ *p = x
+ return p
+}
+
+func (x GroupResourceType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor()
+}
+
+func (GroupResourceType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[1]
+}
+
+func (x GroupResourceType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GroupResourceType.Descriptor instead.
+func (GroupResourceType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1}
+}
+
+// Operational states for an internal checker.
+type InternalChecker_State int32
+
+const (
+ // An internal checker should never be in the unspecified state.
+ InternalChecker_UNSPECIFIED InternalChecker_State = 0
+ // The checker is being created, provisioned, and configured. A checker in
+ // this state can be returned by `ListInternalCheckers` or
+ // `GetInternalChecker`, as well as by examining the [long running
+ // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations)
+ // that created it.
+ InternalChecker_CREATING InternalChecker_State = 1
+ // The checker is running and available for use. A checker in this state
+ // can be returned by `ListInternalCheckers` or `GetInternalChecker` as
+ // well as by examining the [long running
+ // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations)
+ // that created it.
+ // If a checker is being torn down, it is neither visible nor usable, so
+ // there is no "deleting" or "down" state.
+ InternalChecker_RUNNING InternalChecker_State = 2
+)
+
+// Enum value maps for InternalChecker_State.
+var (
+ InternalChecker_State_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "CREATING",
+ 2: "RUNNING",
+ }
+ InternalChecker_State_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "CREATING": 1,
+ "RUNNING": 2,
+ }
+)
+
+func (x InternalChecker_State) Enum() *InternalChecker_State {
+ p := new(InternalChecker_State)
+ *p = x
+ return p
+}
+
+func (x InternalChecker_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor()
+}
+
+func (InternalChecker_State) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[2]
+}
+
+func (x InternalChecker_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use InternalChecker_State.Descriptor instead.
+func (InternalChecker_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// What kind of checkers are available to be used by the check.
+type UptimeCheckConfig_CheckerType int32
+
+const (
+ // The default checker type. Currently converted to `STATIC_IP_CHECKERS`
+ // on creation, the default conversion behavior may change in the future.
+ UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED UptimeCheckConfig_CheckerType = 0
+ // `STATIC_IP_CHECKERS` are used for uptime checks that perform egress
+ // across the public internet. `STATIC_IP_CHECKERS` use the static IP
+ // addresses returned by `ListUptimeCheckIps`.
+ UptimeCheckConfig_STATIC_IP_CHECKERS UptimeCheckConfig_CheckerType = 1
+ // `VPC_CHECKERS` are used for uptime checks that perform egress using
+ // Service Directory and private network access. When using `VPC_CHECKERS`,
+ // the monitored resource type must be `servicedirectory_service`.
+ UptimeCheckConfig_VPC_CHECKERS UptimeCheckConfig_CheckerType = 3
+)
+
+// Enum value maps for UptimeCheckConfig_CheckerType.
+var (
+ UptimeCheckConfig_CheckerType_name = map[int32]string{
+ 0: "CHECKER_TYPE_UNSPECIFIED",
+ 1: "STATIC_IP_CHECKERS",
+ 3: "VPC_CHECKERS",
+ }
+ UptimeCheckConfig_CheckerType_value = map[string]int32{
+ "CHECKER_TYPE_UNSPECIFIED": 0,
+ "STATIC_IP_CHECKERS": 1,
+ "VPC_CHECKERS": 3,
+ }
+)
+
+func (x UptimeCheckConfig_CheckerType) Enum() *UptimeCheckConfig_CheckerType {
+ p := new(UptimeCheckConfig_CheckerType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_CheckerType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_CheckerType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor()
+}
+
+func (UptimeCheckConfig_CheckerType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[3]
+}
+
+func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead.
+func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// The HTTP request method options.
+type UptimeCheckConfig_HttpCheck_RequestMethod int32
+
+const (
+ // No request method specified.
+ UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0
+ // GET request.
+ UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1
+ // POST request.
+ UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod.
+var (
+ UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{
+ 0: "METHOD_UNSPECIFIED",
+ 1: "GET",
+ 2: "POST",
+ }
+ UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{
+ "METHOD_UNSPECIFIED": 0,
+ "GET": 1,
+ "POST": 2,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod {
+ p := new(UptimeCheckConfig_HttpCheck_RequestMethod)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[4]
+}
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0}
+}
+
+// Header options corresponding to the content type of a HTTP request body.
+type UptimeCheckConfig_HttpCheck_ContentType int32
+
+const (
+ // No content type specified.
+ UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0
+ // `body` is in URL-encoded form. Equivalent to setting the `Content-Type`
+ // to `application/x-www-form-urlencoded` in the HTTP request.
+ UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1
+ // `body` is in `custom_content_type` form. Equivalent to setting the
+ // `Content-Type` to the contents of `custom_content_type` in the HTTP
+ // request.
+ UptimeCheckConfig_HttpCheck_USER_PROVIDED UptimeCheckConfig_HttpCheck_ContentType = 2
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType.
+var (
+ UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "URL_ENCODED",
+ 2: "USER_PROVIDED",
+ }
+ UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "URL_ENCODED": 1,
+ "USER_PROVIDED": 2,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType {
+ p := new(UptimeCheckConfig_HttpCheck_ContentType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[5]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1}
+}
+
+// An HTTP status code class.
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass int32
+
+const (
+ // Default value that matches no status codes.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 0
+ // The class of status codes between 100 and 199.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_1XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 100
+ // The class of status codes between 200 and 299.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_2XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 200
+ // The class of status codes between 300 and 399.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_3XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 300
+ // The class of status codes between 400 and 499.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_4XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 400
+ // The class of status codes between 500 and 599.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_5XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 500
+ // The class of all status codes.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_ANY UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 1000
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.
+var (
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_name = map[int32]string{
+ 0: "STATUS_CLASS_UNSPECIFIED",
+ 100: "STATUS_CLASS_1XX",
+ 200: "STATUS_CLASS_2XX",
+ 300: "STATUS_CLASS_3XX",
+ 400: "STATUS_CLASS_4XX",
+ 500: "STATUS_CLASS_5XX",
+ 1000: "STATUS_CLASS_ANY",
+ }
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_value = map[string]int32{
+ "STATUS_CLASS_UNSPECIFIED": 0,
+ "STATUS_CLASS_1XX": 100,
+ "STATUS_CLASS_2XX": 200,
+ "STATUS_CLASS_3XX": 300,
+ "STATUS_CLASS_4XX": 400,
+ "STATUS_CLASS_5XX": 500,
+ "STATUS_CLASS_ANY": 1000,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Enum() *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass {
+ p := new(UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[6].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[6]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0}
+}
+
+// Type of authentication.
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32
+
+const (
+ // Default value, will result in OIDC Authentication.
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0
+ // OIDC Authentication
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.
+var (
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{
+ 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED",
+ 1: "OIDC_TOKEN",
+ }
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{
+ "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0,
+ "OIDC_TOKEN": 1,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType {
+ p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[7]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0}
+}
+
+// Options to perform content matching.
+type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32
+
+const (
+ // No content matcher type specified (maintained for backward
+ // compatibility, but deprecated for future use).
+ // Treated as `CONTAINS_STRING`.
+ UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0
+ // Selects substring matching. The match succeeds if the output contains
+ // the `content` string. This is the default value for checks without
+ // a `matcher` option, or where the value of `matcher` is
+ // `CONTENT_MATCHER_OPTION_UNSPECIFIED`.
+ UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1
+ // Selects negation of substring matching. The match succeeds if the
+ // output does _NOT_ contain the `content` string.
+ UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2
+ // Selects regular-expression matching. The match succeeds if the output
+ // matches the regular expression specified in the `content` string.
+ // Regex matching is only supported for HTTP/HTTPS checks.
+ UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3
+ // Selects negation of regular-expression matching. The match succeeds if
+ // the output does _NOT_ match the regular expression specified in the
+ // `content` string. Regex matching is only supported for HTTP/HTTPS
+ // checks.
+ UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4
+ // Selects JSONPath matching. See `JsonPathMatcher` for details on when
+ // the match succeeds. JSONPath matching is only supported for HTTP/HTTPS
+ // checks.
+ UptimeCheckConfig_ContentMatcher_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 5
+ // Selects JSONPath matching. See `JsonPathMatcher` for details on when
+ // the match succeeds. Succeeds when output does _NOT_ match as specified.
+ // JSONPath is only supported for HTTP/HTTPS checks.
+ UptimeCheckConfig_ContentMatcher_NOT_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 6
+)
+
+// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption.
+var (
+ UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{
+ 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED",
+ 1: "CONTAINS_STRING",
+ 2: "NOT_CONTAINS_STRING",
+ 3: "MATCHES_REGEX",
+ 4: "NOT_MATCHES_REGEX",
+ 5: "MATCHES_JSON_PATH",
+ 6: "NOT_MATCHES_JSON_PATH",
+ }
+ UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{
+ "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0,
+ "CONTAINS_STRING": 1,
+ "NOT_CONTAINS_STRING": 2,
+ "MATCHES_REGEX": 3,
+ "NOT_MATCHES_REGEX": 4,
+ "MATCHES_JSON_PATH": 5,
+ "NOT_MATCHES_JSON_PATH": 6,
+ }
+)
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption {
+ p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor()
+}
+
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[8]
+}
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead.
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0}
+}
+
+// Options to perform JSONPath content matching.
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption int32
+
+const (
+ // No JSONPath matcher type specified (not valid).
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 0
+ // Selects 'exact string' matching. The match succeeds if the content at
+ // the `json_path` within the output is exactly the same as the
+ // `content` string.
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_EXACT_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 1
+ // Selects regular-expression matching. The match succeeds if the
+ // content at the `json_path` within the output matches the regular
+ // expression specified in the `content` string.
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_REGEX_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 2
+)
+
+// Enum value maps for UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.
+var (
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_name = map[int32]string{
+ 0: "JSON_PATH_MATCHER_OPTION_UNSPECIFIED",
+ 1: "EXACT_MATCH",
+ 2: "REGEX_MATCH",
+ }
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_value = map[string]int32{
+ "JSON_PATH_MATCHER_OPTION_UNSPECIFIED": 0,
+ "EXACT_MATCH": 1,
+ "REGEX_MATCH": 2,
+ }
+)
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption {
+ p := new(UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor()
+}
+
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[9]
+}
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead.
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0}
+}
+
+// An internal checker allows Uptime checks to run on private/internal GCP
+// resources.
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+type InternalChecker struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique resource name for this InternalChecker. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID]
+ //
+ // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for
+ // the Uptime check config associated with the internal checker.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The checker's human-readable name. The display name
+ // should be unique within a Cloud Monitoring Metrics Scope in order to make
+ // it easier to identify; however, uniqueness is not enforced.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the
+ // internal resource lives (ex: "default").
+ Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"`
+ // The GCP zone the Uptime check should egress from. Only respected for
+ // internal Uptime checks, where internal_network is specified.
+ GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"`
+ // The GCP project ID where the internal checker lives. Not necessary
+ // the same as the Metrics Scope project.
+ PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"`
+ // The current operational state of the internal checker.
+ State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"`
+}
+
+func (x *InternalChecker) Reset() {
+ *x = InternalChecker{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *InternalChecker) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InternalChecker) ProtoMessage() {}
+
+func (x *InternalChecker) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead.
+func (*InternalChecker) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *InternalChecker) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetNetwork() string {
+ if x != nil {
+ return x.Network
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetGcpZone() string {
+ if x != nil {
+ return x.GcpZone
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetPeerProjectId() string {
+ if x != nil {
+ return x.PeerProjectId
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetState() InternalChecker_State {
+ if x != nil {
+ return x.State
+ }
+ return InternalChecker_UNSPECIFIED
+}
+
+// Describes a Synthetic Monitor to be invoked by Uptime.
+type SyntheticMonitorTarget struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies a Synthetic Monitor's execution stack.
+ //
+ // Types that are assignable to Target:
+ //
+ // *SyntheticMonitorTarget_CloudFunctionV2
+ Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"`
+}
+
+func (x *SyntheticMonitorTarget) Reset() {
+ *x = SyntheticMonitorTarget{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SyntheticMonitorTarget) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SyntheticMonitorTarget) ProtoMessage() {}
+
+func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead.
+func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target {
+ if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok {
+ return x.CloudFunctionV2
+ }
+ return nil
+}
+
+type isSyntheticMonitorTarget_Target interface {
+ isSyntheticMonitorTarget_Target()
+}
+
+type SyntheticMonitorTarget_CloudFunctionV2 struct {
+ // Target a Synthetic Monitor GCFv2 instance.
+ CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"`
+}
+
+func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {}
+
+// This message configures which resources and services to monitor for
+// availability.
+type UptimeCheckConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. A unique resource name for this Uptime check configuration. The
+ // format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ //
+ // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the
+ // Uptime check.
+ //
+ // This field should be omitted when creating the Uptime check configuration;
+ // on create, the resource name is assigned by the server and included in the
+ // response.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A human-friendly name for the Uptime check configuration. The display name
+ // should be unique within a Cloud Monitoring Workspace in order to make it
+ // easier to identify; however, uniqueness is not enforced. Required.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The resource the check is checking. Required.
+ //
+ // Types that are assignable to Resource:
+ //
+ // *UptimeCheckConfig_MonitoredResource
+ // *UptimeCheckConfig_ResourceGroup_
+ // *UptimeCheckConfig_SyntheticMonitor
+ Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"`
+ // The type of Uptime check request.
+ //
+ // Types that are assignable to CheckRequestType:
+ //
+ // *UptimeCheckConfig_HttpCheck_
+ // *UptimeCheckConfig_TcpCheck_
+ CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"`
+ // How often, in seconds, the Uptime check is performed.
+ // Currently, the only supported values are `60s` (1 minute), `300s`
+ // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional,
+ // defaults to `60s`.
+ Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"`
+ // The maximum amount of time to wait for the request to complete (must be
+ // between 1 and 60 seconds). Required.
+ Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // The content that is expected to appear in the data returned by the target
+ // server against which the check is run. Currently, only the first entry
+ // in the `content_matchers` list is supported, and additional entries will
+ // be ignored. This field is optional and should only be specified if a
+ // content match is required as part of the/ Uptime check.
+ ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"`
+ // The type of checkers to use to execute the Uptime check.
+ CheckerType UptimeCheckConfig_CheckerType `protobuf:"varint,17,opt,name=checker_type,json=checkerType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_CheckerType" json:"checker_type,omitempty"`
+ // The list of regions from which the check will be run.
+ // Some regions contain one location, and others contain more than one.
+ // If this field is specified, enough regions must be provided to include a
+ // minimum of 3 locations. Not specifying this field will result in Uptime
+ // checks running from all available regions.
+ SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"`
+ // If this is `true`, then checks are made only from the 'internal_checkers'.
+ // If it is `false`, then checks are made only from the 'selected_regions'.
+ // It is an error to provide 'selected_regions' when is_internal is `true`,
+ // or to provide 'internal_checkers' when is_internal is `false`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+ IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"`
+ // The internal checkers that this check will egress from. If `is_internal` is
+ // `true` and this list is empty, the check will egress from all the
+ // InternalCheckers configured for the project that owns this
+ // `UptimeCheckConfig`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+ InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `UptimeCheckConfig` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,20,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *UptimeCheckConfig) Reset() {
+ *x = UptimeCheckConfig{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig) ProtoMessage() {}
+
+func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *UptimeCheckConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok {
+ return x.MonitoredResource
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok {
+ return x.ResourceGroup
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok {
+ return x.SyntheticMonitor
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType {
+ if m != nil {
+ return m.CheckRequestType
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck {
+ if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok {
+ return x.HttpCheck
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck {
+ if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok {
+ return x.TcpCheck
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.Period
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher {
+ if x != nil {
+ return x.ContentMatchers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetCheckerType() UptimeCheckConfig_CheckerType {
+ if x != nil {
+ return x.CheckerType
+ }
+ return UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion {
+ if x != nil {
+ return x.SelectedRegions
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+func (x *UptimeCheckConfig) GetIsInternal() bool {
+ if x != nil {
+ return x.IsInternal
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker {
+ if x != nil {
+ return x.InternalCheckers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_Resource interface {
+ isUptimeCheckConfig_Resource()
+}
+
+type UptimeCheckConfig_MonitoredResource struct {
+ // The [monitored
+ // resource](https://cloud.google.com/monitoring/api/resources) associated
+ // with the configuration.
+ // The following monitored resource types are valid for this field:
+ //
+ // `uptime_url`,
+ // `gce_instance`,
+ // `gae_app`,
+ // `aws_ec2_instance`,
+ // `aws_elb_load_balancer`
+ // `k8s_service`
+ // `servicedirectory_service`
+ // `cloud_run_revision`
+ MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"`
+}
+
+type UptimeCheckConfig_ResourceGroup_ struct {
+ // The group resource associated with the configuration.
+ ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"`
+}
+
+type UptimeCheckConfig_SyntheticMonitor struct {
+ // Specifies a Synthetic Monitor to invoke.
+ SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {}
+
+func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {}
+
+func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {}
+
+type isUptimeCheckConfig_CheckRequestType interface {
+ isUptimeCheckConfig_CheckRequestType()
+}
+
+type UptimeCheckConfig_HttpCheck_ struct {
+ // Contains information needed to make an HTTP or HTTPS check.
+ HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"`
+}
+
+type UptimeCheckConfig_TcpCheck_ struct {
+ // Contains information needed to make a TCP check.
+ TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+// Contains the region, location, and list of IP
+// addresses where checkers in the location run from.
+type UptimeCheckIp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A broad region category in which the IP address is located.
+ Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"`
+ // A more specific location within the region that typically encodes
+ // a particular city/town/metro (and its containing state/province or country)
+ // within the broader umbrella region category.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The IP address from which the Uptime check originates. This is a fully
+ // specified IP address (not an IP address range). Most IP addresses, as of
+ // this publication, are in IPv4 format; however, one should not rely on the
+ // IP addresses being in IPv4 format indefinitely, and should support
+ // interpreting this field in either IPv4 or IPv6 format.
+ IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
+}
+
+func (x *UptimeCheckIp) Reset() {
+ *x = UptimeCheckIp{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckIp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckIp) ProtoMessage() {}
+
+func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead.
+func (*UptimeCheckIp) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion {
+ if x != nil {
+ return x.Region
+ }
+ return UptimeCheckRegion_REGION_UNSPECIFIED
+}
+
+func (x *UptimeCheckIp) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *UptimeCheckIp) GetIpAddress() string {
+ if x != nil {
+ return x.IpAddress
+ }
+ return ""
+}
+
+// A Synthetic Monitor deployed to a Cloud Functions V2 instance.
+type SyntheticMonitorTarget_CloudFunctionV2Target struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Fully qualified GCFv2 resource name
+ // i.e. `projects/{project}/locations/{location}/functions/{function}`
+ // Required.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Output only. The `cloud_run_revision` Monitored Resource associated with
+ // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and
+ // spans) are reported against this Monitored Resource. This field is output
+ // only.
+ CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"`
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() {
+ *x = SyntheticMonitorTarget_CloudFunctionV2Target{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead.
+func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource {
+ if x != nil {
+ return x.CloudRunRevision
+ }
+ return nil
+}
+
+// The resource submessage for group checks. It can be used instead of a
+// monitored resource, when multiple resources are being monitored.
+type UptimeCheckConfig_ResourceGroup struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The group of resources being monitored. Should be only the `[GROUP_ID]`,
+ // and not the full-path
+ // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`.
+ GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+ // The resource type of the group members.
+ ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"`
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) Reset() {
+ *x = UptimeCheckConfig_ResourceGroup{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string {
+ if x != nil {
+ return x.GroupId
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType {
+ if x != nil {
+ return x.ResourceType
+ }
+ return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED
+}
+
+// Information involved in sending ICMP pings alongside public HTTP/TCP
+// checks. For HTTP, the pings are performed for each part of the redirect
+// chain.
+type UptimeCheckConfig_PingConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Number of ICMP pings. A maximum of 3 ICMP pings is currently supported.
+ PingsCount int32 `protobuf:"varint,1,opt,name=pings_count,json=pingsCount,proto3" json:"pings_count,omitempty"`
+}
+
+func (x *UptimeCheckConfig_PingConfig) Reset() {
+ *x = UptimeCheckConfig_PingConfig{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_PingConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_PingConfig) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 {
+ if x != nil {
+ return x.PingsCount
+ }
+ return 0
+}
+
+// Information involved in an HTTP/HTTPS Uptime check request.
+type UptimeCheckConfig_HttpCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP request method to use for the check. If set to
+ // `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`.
+ RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"`
+ // If `true`, use HTTPS instead of HTTP to run the check.
+ UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"`
+ // Optional (defaults to "/"). The path to the page against which to run
+ // the check. Will be combined with the `host` (specified within the
+ // `monitored_resource`) and `port` to construct the full URL. If the
+ // provided path does not begin with "/", a "/" will be prepended
+ // automatically.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when
+ // `use_ssl` is `true`). The TCP port on the HTTP server against which to
+ // run the check. Will be combined with host (specified within the
+ // `monitored_resource`) and `path` to construct the full URL.
+ Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"`
+ // The authentication information. Optional when creating an HTTP check;
+ // defaults to empty.
+ // Do not set both `auth_method` and `auth_info`.
+ AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"`
+ // Boolean specifying whether to encrypt the header information.
+ // Encryption should be specified for any headers related to authentication
+ // that you do not wish to be seen when retrieving the configuration. The
+ // server will be responsible for encrypting the headers.
+ // On Get/List calls, if `mask_headers` is set to `true` then the headers
+ // will be obscured with `******.`
+ MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"`
+ // The list of headers to send as part of the Uptime check request.
+ // If two headers have the same key and different values, they should
+ // be entered as a single header, with the value being a comma-separated
+ // list of all the desired values as described at
+ // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31).
+ // Entering two separate headers with the same key in a Create call will
+ // cause the first to be overwritten by the second.
+ // The maximum number of headers allowed is 100.
+ Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The content type header to use for the check. The following
+ // configurations result in errors:
+ // 1. Content type is specified in both the `headers` field and the
+ // `content_type` field.
+ // 2. Request method is `GET` and `content_type` is not `TYPE_UNSPECIFIED`
+ // 3. Request method is `POST` and `content_type` is `TYPE_UNSPECIFIED`.
+ // 4. Request method is `POST` and a "Content-Type" header is provided via
+ // `headers` field. The `content_type` field should be used instead.
+ ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"`
+ // A user provided content type header to use for the check. The invalid
+ // configurations outlined in the `content_type` field apply to
+ // `custom_content_type`, as well as the following:
+ // 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set.
+ // 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not
+ // set.
+ CustomContentType string `protobuf:"bytes,13,opt,name=custom_content_type,json=customContentType,proto3" json:"custom_content_type,omitempty"`
+ // Boolean specifying whether to include SSL certificate validation as a
+ // part of the Uptime check. Only applies to checks where
+ // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`,
+ // setting `validate_ssl` to `true` has no effect.
+ ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"`
+ // The request body associated with the HTTP POST request. If `content_type`
+ // is `URL_ENCODED`, the body passed in must be URL-encoded. Users can
+ // provide a `Content-Length` header via the `headers` field or the API will
+ // do so. If the `request_method` is `GET` and `body` is not empty, the API
+ // will return an error. The maximum byte size is 1 megabyte.
+ //
+ // Note: If client libraries aren't used (which performs the conversion
+ // automatically) base64 encode your `body` data since the field is of
+ // `bytes` type.
+ Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"`
+ // If present, the check will only pass if the HTTP response status code is
+ // in this set of status codes. If empty, the HTTP status code will only
+ // pass if the HTTP status code is 200-299.
+ AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"`
+ // Contains information needed to add pings to an HTTP check.
+ PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"`
+ // This field is optional and should be set only by users interested in
+ // an authenticated uptime check.
+ // Do not set both `auth_method` and `auth_info`.
+ //
+ // Types that are assignable to AuthMethod:
+ //
+ // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_
+ AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck) Reset() {
+ *x = UptimeCheckConfig_HttpCheck{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2}
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod {
+ if x != nil {
+ return x.RequestMethod
+ }
+ return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool {
+ if x != nil {
+ return x.UseSsl
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication {
+ if x != nil {
+ return x.AuthInfo
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool {
+ if x != nil {
+ return x.MaskHeaders
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType {
+ if x != nil {
+ return x.ContentType
+ }
+ return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetCustomContentType() string {
+ if x != nil {
+ return x.CustomContentType
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool {
+ if x != nil {
+ return x.ValidateSsl
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetAcceptedResponseStatusCodes() []*UptimeCheckConfig_HttpCheck_ResponseStatusCode {
+ if x != nil {
+ return x.AcceptedResponseStatusCodes
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig {
+ if x != nil {
+ return x.PingConfig
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod {
+ if m != nil {
+ return m.AuthMethod
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication {
+ if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok {
+ return x.ServiceAgentAuthentication
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_HttpCheck_AuthMethod interface {
+ isUptimeCheckConfig_HttpCheck_AuthMethod()
+}
+
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct {
+ // If specified, Uptime will generate and attach an OIDC JWT token for the
+ // Monitoring service agent service account as an `Authorization` header
+ // in the HTTP request when probing.
+ ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() {
+}
+
+// Information required for a TCP Uptime check request.
+type UptimeCheckConfig_TcpCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The TCP port on the server against which to run the check. Will be
+ // combined with host (specified within the `monitored_resource`) to
+ // construct the full URL. Required.
+ Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+ // Contains information needed to add pings to a TCP check.
+ PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,2,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"`
+}
+
+func (x *UptimeCheckConfig_TcpCheck) Reset() {
+ *x = UptimeCheckConfig_TcpCheck{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_TcpCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3}
+}
+
+func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_TcpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig {
+ if x != nil {
+ return x.PingConfig
+ }
+ return nil
+}
+
+// Optional. Used to perform content matching. This allows matching based on
+// substrings and regular expressions, together with their negations. Only the
+// first 4 MB of an HTTP or HTTPS check's response (and the first
+// 1 MB of a TCP check's response) are examined for purposes of content
+// matching.
+type UptimeCheckConfig_ContentMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // String, regex or JSON content to match. Maximum 1024 bytes. An empty
+ // `content` string indicates no content matching is to be performed.
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ // The type of content matcher that will be applied to the server output,
+ // compared to the `content` string when the check is run.
+ Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"`
+ // Certain `ContentMatcherOption` types require additional information.
+ // `MATCHES_JSON_PATH` or `NOT_MATCHES_JSON_PATH` require a
+ // `JsonPathMatcher`; not used for other options.
+ //
+ // Types that are assignable to AdditionalMatcherInfo:
+ //
+ // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_
+ AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"`
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) Reset() {
+ *x = UptimeCheckConfig_ContentMatcher{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4}
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption {
+ if x != nil {
+ return x.Matcher
+ }
+ return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED
+}
+
+func (m *UptimeCheckConfig_ContentMatcher) GetAdditionalMatcherInfo() isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo {
+ if m != nil {
+ return m.AdditionalMatcherInfo
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetJsonPathMatcher() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher {
+ if x, ok := x.GetAdditionalMatcherInfo().(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_); ok {
+ return x.JsonPathMatcher
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo interface {
+ isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo()
+}
+
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ struct {
+ // Matcher information for `MATCHES_JSON_PATH` and `NOT_MATCHES_JSON_PATH`
+ JsonPathMatcher *UptimeCheckConfig_ContentMatcher_JsonPathMatcher `protobuf:"bytes,3,opt,name=json_path_matcher,json=jsonPathMatcher,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_) isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() {
+}
+
+// The authentication parameters to provide to the specified resource or
+// URL that requires a username and password. Currently, only
+// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is
+// supported in Uptime checks.
+type UptimeCheckConfig_HttpCheck_BasicAuthentication struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The username to use when authenticating with the HTTP server.
+ Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
+ // The password to use when authenticating with the HTTP server.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0}
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string {
+ if x != nil {
+ return x.Password
+ }
+ return ""
+}
+
+// A status to accept. Either a status code class like "2xx", or an integer
+// status code like "200".
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Either a specific value or a class of status codes.
+ //
+ // Types that are assignable to StatusCode:
+ //
+ // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue
+ // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_
+ StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1}
+}
+
+func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode {
+ if m != nil {
+ return m.StatusCode
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusValue() int32 {
+ if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue); ok {
+ return x.StatusValue
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusClass() UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass {
+ if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_); ok {
+ return x.StatusClass
+ }
+ return UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED
+}
+
+type isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode interface {
+ isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode()
+}
+
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue struct {
+ // A status code to accept.
+ StatusValue int32 `protobuf:"varint,1,opt,name=status_value,json=statusValue,proto3,oneof"`
+}
+
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ struct {
+ // A class of status codes to accept.
+ StatusClass UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass `protobuf:"varint,2,opt,name=status_class,json=statusClass,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() {
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() {
+}
+
+// Contains information needed for generating either an
+// [OpenID Connect
+// token](https://developers.google.com/identity/protocols/OpenIDConnect) or
+// [OAuth token](https://developers.google.com/identity/protocols/oauth2).
+// The token will be generated for the Monitoring service agent service
+// account.
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of authentication.
+ Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2}
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType {
+ if x != nil {
+ return x.Type
+ }
+ return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED
+}
+
+// Information needed to perform a JSONPath content match.
+// Used for `ContentMatcherOption::MATCHES_JSON_PATH` and
+// `ContentMatcherOption::NOT_MATCHES_JSON_PATH`.
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // JSONPath within the response output pointing to the expected
+ // `ContentMatcher::content` to match against.
+ JsonPath string `protobuf:"bytes,1,opt,name=json_path,json=jsonPath,proto3" json:"json_path,omitempty"`
+ // The type of JSONPath match that will be applied to the JSON output
+ // (`ContentMatcher.content`)
+ JsonMatcher UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption `protobuf:"varint,2,opt,name=json_matcher,json=jsonMatcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption" json:"json_matcher,omitempty"`
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() {
+ *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0}
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string {
+ if x != nil {
+ return x.JsonPath
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonMatcher() UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption {
+ if x != nil {
+ return x.JsonMatcher
+ }
+ return UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED
+}
+
+var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x19, 0x0a, 0x08,
+ 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12,
+ 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08,
+ 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55,
+ 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, 0x02, 0x0a, 0x16,
+ 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74,
+ 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
+ 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e,
+ 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48,
+ 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69,
+ 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48,
+ 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x08, 0x74, 0x63,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61,
+ 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x6c,
+ 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a,
+ 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x75, 0x73,
+ 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64,
+ 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x2d,
+ 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xef, 0x0e,
+ 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, 0x0a, 0x0e, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04,
+ 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63,
+ 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x6b,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x89,
+ 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65,
+ 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x1b, 0x61,
+ 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69,
+ 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65,
+ 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65,
+ 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
+ 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00,
+ 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x75, 0x0a,
+ 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43,
+ 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41,
+ 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54,
+ 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, 0xc8, 0x01, 0x12,
+ 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f,
+ 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53,
+ 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, 0x12, 0x15, 0x0a,
+ 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x35, 0x58,
+ 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43,
+ 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, 0x0a, 0x0b, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, 0x0a, 0x1a, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65,
+ 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, 0x1e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x2d,
+ 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x55,
+ 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x10, 0x01, 0x1a,
+ 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12,
+ 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a,
+ 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x11, 0x0a,
+ 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x02,
+ 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a,
+ 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x70,
+ 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12,
+ 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69,
+ 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x84, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x11, 0x6a, 0x73, 0x6f, 0x6e,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73, 0x6f, 0x6e,
+ 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x94,
+ 0x02, 0x0a, 0x0f, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12,
+ 0x7f, 0x0a, 0x0c, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73,
+ 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73,
+ 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x22, 0x63, 0x0a, 0x15, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x24, 0x4a, 0x53, 0x4f,
+ 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x52, 0x5f, 0x4f,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x41, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54,
+ 0x43, 0x48, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x47, 0x45, 0x58, 0x5f, 0x4d, 0x41,
+ 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26,
+ 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45,
+ 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49,
+ 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49,
+ 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f,
+ 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x54, 0x5f, 0x4d,
+ 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x04, 0x12, 0x15,
+ 0x0a, 0x11, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50,
+ 0x41, 0x54, 0x48, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x41, 0x54,
+ 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x10, 0x06,
+ 0x42, 0x19, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x3d, 0x0a, 0x0f, 0x55,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x55, 0x0a, 0x0b, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x45,
+ 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x49,
+ 0x43, 0x5f, 0x49, 0x50, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12,
+ 0x10, 0x0a, 0x0c, 0x56, 0x50, 0x43, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, 0x10,
+ 0x03, 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
+ 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65,
+ 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, 0x0a, 0x06, 0x72,
+ 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a,
+ 0x12, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x4f,
+ 0x55, 0x54, 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, 0x12, 0x10, 0x0a,
+ 0x0c, 0x41, 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, 0x10, 0x04, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x5f, 0x4f, 0x52, 0x45, 0x47, 0x4f, 0x4e, 0x10, 0x05, 0x12,
+ 0x0c, 0x0a, 0x08, 0x55, 0x53, 0x41, 0x5f, 0x49, 0x4f, 0x57, 0x41, 0x10, 0x06, 0x12, 0x10, 0x0a,
+ 0x0c, 0x55, 0x53, 0x41, 0x5f, 0x56, 0x49, 0x52, 0x47, 0x49, 0x4e, 0x49, 0x41, 0x10, 0x07, 0x2a,
+ 0x5b, 0x0a, 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10,
+ 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, 0x4c, 0x4f, 0x41,
+ 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, 0xaf, 0x02, 0xea,
+ 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56,
+ 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02,
+ 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc
+)
+
+func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_uptime_proto_rawDescData
+}
+
+var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
+var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_monitoring_v3_uptime_proto_goTypes = []any{
+ (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion
+ (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType
+ (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State
+ (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType
+ (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod
+ (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType
+ (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass
+ (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType
+ (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption
+ (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption
+ (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker
+ (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget
+ (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig
+ (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp
+ (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target
+ (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup
+ (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig
+ (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck
+ (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck
+ (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher
+ nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry
+ (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode
+ (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication
+ nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry
+ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher
+ (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource
+ (*durationpb.Duration)(nil), // 27: google.protobuf.Duration
+}
+var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{
+ 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State
+ 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target
+ 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource
+ 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup
+ 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget
+ 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck
+ 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck
+ 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration
+ 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration
+ 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher
+ 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType
+ 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion
+ 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker
+ 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry
+ 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion
+ 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource
+ 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType
+ 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod
+ 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication
+ 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry
+ 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType
+ 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode
+ 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig
+ 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication
+ 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig
+ 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption
+ 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher
+ 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass
+ 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType
+ 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_uptime_proto_init() }
+func file_google_monitoring_v3_uptime_proto_init() {
+ if File_google_monitoring_v3_uptime_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{
+ (*SyntheticMonitorTarget_CloudFunctionV2)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{
+ (*UptimeCheckConfig_MonitoredResource)(nil),
+ (*UptimeCheckConfig_ResourceGroup_)(nil),
+ (*UptimeCheckConfig_SyntheticMonitor)(nil),
+ (*UptimeCheckConfig_HttpCheck_)(nil),
+ (*UptimeCheckConfig_TcpCheck_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{
+ (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{
+ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil),
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc,
+ NumEnums: 10,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_uptime_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_uptime_proto = out.File
+ file_google_monitoring_v3_uptime_proto_rawDesc = nil
+ file_google_monitoring_v3_uptime_proto_goTypes = nil
+ file_google_monitoring_v3_uptime_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
new file mode 100644
index 000000000..9ea159bbd
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
@@ -0,0 +1,1112 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/uptime_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The protocol for the `ListUptimeCheckConfigs` request.
+type ListUptimeCheckConfigsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // Uptime check configurations are listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // uptime checks to be included in the response.
+ //
+ // For more details, see [Filtering
+ // syntax](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax).
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckConfigsRequest) Reset() {
+ *x = ListUptimeCheckConfigsRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckConfigsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckConfigsRequest) ProtoMessage() {}
+
+func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckConfigs` response.
+type ListUptimeCheckConfigsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned Uptime check configurations.
+ UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of Uptime check configurations for the project,
+ // irrespective of any pagination.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListUptimeCheckConfigsResponse) Reset() {
+ *x = ListUptimeCheckConfigsResponse{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckConfigsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckConfigsResponse) ProtoMessage() {}
+
+func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfigs
+ }
+ return nil
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The protocol for the `GetUptimeCheckConfig` request.
+type GetUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Uptime check configuration to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetUptimeCheckConfigRequest) Reset() {
+ *x = GetUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetUptimeCheckConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `CreateUptimeCheckConfig` request.
+type CreateUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the Uptime check. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The new Uptime check configuration.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+}
+
+func (x *CreateUptimeCheckConfigRequest) Reset() {
+ *x = CreateUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateUptimeCheckConfigRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `UpdateUptimeCheckConfig` request.
+type UpdateUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. If present, only the listed fields in the current Uptime check
+ // configuration are updated with values from the new configuration. If this
+ // field is empty, then the current configuration is completely replaced with
+ // the new configuration.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. If an `updateMask` has been specified, this field gives
+ // the values for the set of fields mentioned in the `updateMask`. If an
+ // `updateMask` has not been given, this Uptime check configuration replaces
+ // the current configuration. If a field is mentioned in `updateMask` but
+ // the corresponding field is omitted in this partial Uptime check
+ // configuration, it has the effect of deleting/clearing the field from the
+ // configuration on the server.
+ //
+ // The following fields can be updated: `display_name`,
+ // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and
+ // `selected_regions`.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+}
+
+func (x *UpdateUptimeCheckConfigRequest) Reset() {
+ *x = UpdateUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `DeleteUptimeCheckConfig` request.
+type DeleteUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Uptime check configuration to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteUptimeCheckConfigRequest) Reset() {
+ *x = DeleteUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteUptimeCheckConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` request.
+type ListUptimeCheckIpsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ // NOTE: this field is not yet implemented
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ // NOTE: this field is not yet implemented
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckIpsRequest) Reset() {
+ *x = ListUptimeCheckIpsRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckIpsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckIpsRequest) ProtoMessage() {}
+
+func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListUptimeCheckIpsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` response.
+type ListUptimeCheckIpsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned list of IP addresses (including region and location) that the
+ // checkers run from.
+ UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ // NOTE: this field is not yet implemented
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckIpsResponse) Reset() {
+ *x = ListUptimeCheckIpsResponse{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckIpsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckIpsResponse) ProtoMessage() {}
+
+func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp {
+ if x != nil {
+ return x.UptimeCheckIps
+ }
+ return nil
+}
+
+func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
+ 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69,
+ 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66,
+ 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a,
+ 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
+ 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a,
+ 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x39, 0xda, 0x41, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0x64, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x2a, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x71, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x55,
+ 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
+ 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c,
+ 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70,
+ 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76,
+ 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73,
+ 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2,
+ 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
+ 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74,
+ 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a,
+ 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f,
+ 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_uptime_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{
+ (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest
+ (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse
+ (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest
+ (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest
+ (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest
+ (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest
+ (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest
+ (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse
+ (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig
+ (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask
+ (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp
+ (*emptypb.Empty)(nil), // 11: google.protobuf.Empty
+}
+var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{
+ 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp
+ 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest
+ 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest
+ 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest
+ 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest
+ 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest
+ 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest
+ 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse
+ 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty
+ 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse
+ 11, // [11:17] is the sub-list for method output_type
+ 5, // [5:11] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_uptime_service_proto_init() }
+func file_google_monitoring_v3_uptime_service_proto_init() {
+ if File_google_monitoring_v3_uptime_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_uptime_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_uptime_service_proto = out.File
+ file_google_monitoring_v3_uptime_service_proto_rawDesc = nil
+ file_google_monitoring_v3_uptime_service_proto_goTypes = nil
+ file_google_monitoring_v3_uptime_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// UptimeCheckServiceClient is the client API for UptimeCheckService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type UptimeCheckServiceClient interface {
+ // Lists the existing valid Uptime check configurations for the project
+ // (leaving out any invalid configurations).
+ ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single Uptime check configuration.
+ GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Creates a new Uptime check configuration.
+ CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Updates an Uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `updateMask`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Deletes an Uptime check configuration. Note that this method will fail
+ // if the Uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Returns the list of IP addresses that checkers run from.
+ ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error)
+}
+
+type uptimeCheckServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient {
+ return &uptimeCheckServiceClient{cc}
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) {
+ out := new(ListUptimeCheckConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) {
+ out := new(ListUptimeCheckIpsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// UptimeCheckServiceServer is the server API for UptimeCheckService service.
+type UptimeCheckServiceServer interface {
+ // Lists the existing valid Uptime check configurations for the project
+ // (leaving out any invalid configurations).
+ ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single Uptime check configuration.
+ GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Creates a new Uptime check configuration.
+ CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Updates an Uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `updateMask`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Deletes an Uptime check configuration. Note that this method will fail
+ // if the Uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error)
+ // Returns the list of IP addresses that checkers run from.
+ ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error)
+}
+
+// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedUptimeCheckServiceServer struct {
+}
+
+func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented")
+}
+
+func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) {
+ s.RegisterService(&_UptimeCheckService_serviceDesc, srv)
+}
+
+func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckIpsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.UptimeCheckService",
+ HandlerType: (*UptimeCheckServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListUptimeCheckConfigs",
+ Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler,
+ },
+ {
+ MethodName: "GetUptimeCheckConfig",
+ Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "CreateUptimeCheckConfig",
+ Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "UpdateUptimeCheckConfig",
+ Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "DeleteUptimeCheckConfig",
+ Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "ListUptimeCheckIps",
+ Handler: _UptimeCheckService_ListUptimeCheckIps_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/uptime_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
new file mode 100644
index 000000000..3b36b219e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
@@ -0,0 +1,622 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newNotificationChannelClientHook clientHook
+
+// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
+type NotificationChannelCallOptions struct {
+ ListNotificationChannelDescriptors []gax.CallOption
+ GetNotificationChannelDescriptor []gax.CallOption
+ ListNotificationChannels []gax.CallOption
+ GetNotificationChannel []gax.CallOption
+ CreateNotificationChannel []gax.CallOption
+ UpdateNotificationChannel []gax.CallOption
+ DeleteNotificationChannel []gax.CallOption
+ SendNotificationChannelVerificationCode []gax.CallOption
+ GetNotificationChannelVerificationCode []gax.CallOption
+ VerifyNotificationChannel []gax.CallOption
+}
+
+func defaultNotificationChannelGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
+ return &NotificationChannelCallOptions{
+ ListNotificationChannelDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetNotificationChannelDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListNotificationChannels: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ SendNotificationChannelVerificationCode: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetNotificationChannelVerificationCode: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ VerifyNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalNotificationChannelClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator
+ GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error)
+ ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator
+ GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error
+ SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error
+ GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error)
+ VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+}
+
+// NotificationChannelClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+type NotificationChannelClient struct {
+ // The internal transport-dependent client.
+ internalClient internalNotificationChannelClient
+
+ // The call options for this service.
+ CallOptions *NotificationChannelCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *NotificationChannelClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
+// makes it possible for new channel types to be dynamically added.
+func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...)
+}
+
+// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
+// are expected / permitted for a notification channel of the given type.
+func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...)
+}
+
+// ListNotificationChannels lists the notification channels that have been created for the project.
+// To list the types of notification channels that are supported, use
+// the ListNotificationChannelDescriptors method.
+func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ return c.internalClient.ListNotificationChannels(ctx, req, opts...)
+}
+
+// GetNotificationChannel gets a single notification channel. The channel includes the relevant
+// configuration details with which the channel was created. However, the
+// response may truncate or omit passwords, API keys, or other private key
+// matter and thus the response may not be 100% identical to the information
+// that was supplied in the call to the create method.
+func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.GetNotificationChannel(ctx, req, opts...)
+}
+
+// CreateNotificationChannel creates a new notification channel, representing a single notification
+// endpoint such as an email address, SMS number, or PagerDuty service.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.CreateNotificationChannel(ctx, req, opts...)
+}
+
+// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
+// remain unchanged.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.UpdateNotificationChannel(ctx, req, opts...)
+}
+
+// DeleteNotificationChannel deletes a notification channel.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteNotificationChannel(ctx, req, opts...)
+}
+
+// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code
+// can then be supplied in VerifyNotificationChannel to verify the channel.
+func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
+ return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...)
+}
+
+// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then
+// be used in a call to VerifyNotificationChannel() on a different channel
+// with an equivalent identity in the same or in a different project. This
+// makes it possible to copy a channel between projects without requiring
+// manual reverification of the channel. If the channel is not in the
+// verified state, this method will fail (in other words, this may only be
+// used if the SendNotificationChannelVerificationCode and
+// VerifyNotificationChannel paths have already been used to put the given
+// channel into the verified state).
+//
+// There is no guarantee that the verification codes returned by this method
+// will be of a similar structure or form as the ones that are delivered
+// to the channel via SendNotificationChannelVerificationCode; while
+// VerifyNotificationChannel() will recognize both the codes delivered via
+// SendNotificationChannelVerificationCode() and returned from
+// GetNotificationChannelVerificationCode(), it is typically the case that
+// the verification codes delivered via
+// SendNotificationChannelVerificationCode() will be shorter and also
+// have a shorter expiration (e.g. codes such as “G-123456”) whereas
+// GetVerificationCode() will typically return a much longer, websafe base
+// 64 encoded string that has a longer expiration time.
+func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
+ return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...)
+}
+
+// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code
+// delivered to the channel as a result of calling
+// SendNotificationChannelVerificationCode.
+func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.VerifyNotificationChannel(ctx, req, opts...)
+}
+
+// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type notificationChannelGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing NotificationChannelClient
+ CallOptions **NotificationChannelCallOptions
+
+ // The gRPC API client.
+ notificationChannelClient monitoringpb.NotificationChannelServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewNotificationChannelClient creates a new notification channel service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
+ clientOpts := defaultNotificationChannelGRPCClientOptions()
+ if newNotificationChannelClientHook != nil {
+ hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()}
+
+ c := ¬ificationChannelGRPCClient{
+ connPool: connPool,
+ notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *notificationChannelGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...)
+ it := &NotificationChannelDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
+ resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannelDescriptors, req, settings.GRPC, c.logger, "ListNotificationChannelDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...)
+ var resp *monitoringpb.NotificationChannelDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelDescriptor, req, settings.GRPC, c.logger, "GetNotificationChannelDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...)
+ it := &NotificationChannelIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
+ resp := &monitoringpb.ListNotificationChannelsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannels, req, settings.GRPC, c.logger, "ListNotificationChannels")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannel, req, settings.GRPC, c.logger, "GetNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.CreateNotificationChannel, req, settings.GRPC, c.logger, "CreateNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.UpdateNotificationChannel, req, settings.GRPC, c.logger, "UpdateNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.notificationChannelClient.DeleteNotificationChannel, req, settings.GRPC, c.logger, "DeleteNotificationChannel")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.notificationChannelClient.SendNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "SendNotificationChannelVerificationCode")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...)
+ var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "GetNotificationChannelVerificationCode")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.VerifyNotificationChannel, req, settings.GRPC, c.logger, "VerifyNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
new file mode 100644
index 000000000..f792f2bd7
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+var newQueryClientHook clientHook
+
+// QueryCallOptions contains the retry settings for each method of QueryClient.
+type QueryCallOptions struct {
+ QueryTimeSeries []gax.CallOption
+}
+
+func defaultQueryGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultQueryCallOptions() *QueryCallOptions {
+ return &QueryCallOptions{
+ QueryTimeSeries: []gax.CallOption{},
+ }
+}
+
+// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalQueryClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator
+}
+
+// QueryClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The QueryService API is used to manage time series data in Cloud
+// Monitoring. Time series data is a collection of data points that describes
+// the time-varying values of a metric.
+type QueryClient struct {
+ // The internal transport-dependent client.
+ internalClient internalQueryClient
+
+ // The call options for this service.
+ CallOptions *QueryCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *QueryClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *QueryClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *QueryClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// QueryTimeSeries queries time series by using Monitoring Query Language (MQL). We recommend
+// using PromQL instead of MQL. For more information about the status of MQL,
+// see the MQL deprecation
+// notice (at https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: QueryTimeSeries may be removed in a future version.
+func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
+ return c.internalClient.QueryTimeSeries(ctx, req, opts...)
+}
+
+// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type queryGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing QueryClient
+ CallOptions **QueryCallOptions
+
+ // The gRPC API client.
+ queryClient monitoringpb.QueryServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewQueryClient creates a new query service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The QueryService API is used to manage time series data in Cloud
+// Monitoring. Time series data is a collection of data points that describes
+// the time-varying values of a metric.
+func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) {
+ clientOpts := defaultQueryGRPCClientOptions()
+ if newQueryClientHook != nil {
+ hookOpts, err := newQueryClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := QueryClient{CallOptions: defaultQueryCallOptions()}
+
+ c := &queryGRPCClient{
+ connPool: connPool,
+ queryClient: monitoringpb.NewQueryServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *queryGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *queryGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...)
+ it := &TimeSeriesDataIterator{}
+ req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) {
+ resp := &monitoringpb.QueryTimeSeriesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.queryClient.QueryTimeSeries, req, settings.GRPC, c.logger, "QueryTimeSeries")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
new file mode 100644
index 000000000..7dc66e373
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
@@ -0,0 +1,569 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newServiceMonitoringClientHook clientHook
+
+// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient.
+type ServiceMonitoringCallOptions struct {
+ CreateService []gax.CallOption
+ GetService []gax.CallOption
+ ListServices []gax.CallOption
+ UpdateService []gax.CallOption
+ DeleteService []gax.CallOption
+ CreateServiceLevelObjective []gax.CallOption
+ GetServiceLevelObjective []gax.CallOption
+ ListServiceLevelObjectives []gax.CallOption
+ UpdateServiceLevelObjective []gax.CallOption
+ DeleteServiceLevelObjective []gax.CallOption
+}
+
+func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions {
+ return &ServiceMonitoringCallOptions{
+ CreateService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListServices: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListServiceLevelObjectives: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalServiceMonitoringClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator
+ UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error
+ CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator
+ UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error
+}
+
+// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
+// managing and querying aspects of a Metrics Scope’s services. These include
+// the Service's monitored resources, its Service-Level Objectives, and a
+// taxonomy of categorized Health Metrics.
+type ServiceMonitoringClient struct {
+ // The internal transport-dependent client.
+ internalClient internalServiceMonitoringClient
+
+ // The call options for this service.
+ CallOptions *ServiceMonitoringCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *ServiceMonitoringClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// CreateService create a Service.
+func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.CreateService(ctx, req, opts...)
+}
+
+// GetService get the named Service.
+func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.GetService(ctx, req, opts...)
+}
+
+// ListServices list Services for this Metrics Scope.
+func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
+ return c.internalClient.ListServices(ctx, req, opts...)
+}
+
+// UpdateService update this Service.
+func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.UpdateService(ctx, req, opts...)
+}
+
+// DeleteService soft delete this Service.
+func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteService(ctx, req, opts...)
+}
+
+// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service.
+func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...)
+}
+
+// GetServiceLevelObjective get a ServiceLevelObjective by name.
+func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.GetServiceLevelObjective(ctx, req, opts...)
+}
+
+// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service.
+func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
+ return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...)
+}
+
+// UpdateServiceLevelObjective update the given ServiceLevelObjective.
+func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...)
+}
+
+// DeleteServiceLevelObjective delete the given ServiceLevelObjective.
+func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...)
+}
+
+// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type serviceMonitoringGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing ServiceMonitoringClient
+ CallOptions **ServiceMonitoringCallOptions
+
+ // The gRPC API client.
+ serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
+// managing and querying aspects of a Metrics Scope’s services. These include
+// the Service's monitored resources, its Service-Level Objectives, and a
+// taxonomy of categorized Health Metrics.
+func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) {
+ clientOpts := defaultServiceMonitoringGRPCClientOptions()
+ if newServiceMonitoringClientHook != nil {
+ hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()}
+
+ c := &serviceMonitoringGRPCClient{
+ connPool: connPool,
+ serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *serviceMonitoringGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateService, req, settings.GRPC, c.logger, "CreateService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetService, req, settings.GRPC, c.logger, "GetService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...)
+ it := &ServiceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListServicesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) {
+ resp := &monitoringpb.ListServicesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServices, req, settings.GRPC, c.logger, "ListServices")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetServices(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateService, req, settings.GRPC, c.logger, "UpdateService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteService, req, settings.GRPC, c.logger, "DeleteService")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateServiceLevelObjective, req, settings.GRPC, c.logger, "CreateServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetServiceLevelObjective, req, settings.GRPC, c.logger, "GetServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...)
+ it := &ServiceLevelObjectiveIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) {
+ resp := &monitoringpb.ListServiceLevelObjectivesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServiceLevelObjectives, req, settings.GRPC, c.logger, "ListServiceLevelObjectives")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateServiceLevelObjective, req, settings.GRPC, c.logger, "UpdateServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteServiceLevelObjective, req, settings.GRPC, c.logger, "DeleteServiceLevelObjective")
+ return err
+ }, opts...)
+ return err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
new file mode 100644
index 000000000..5b76a486b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
@@ -0,0 +1,347 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newSnoozeClientHook clientHook
+
+// SnoozeCallOptions contains the retry settings for each method of SnoozeClient.
+type SnoozeCallOptions struct {
+ CreateSnooze []gax.CallOption
+ ListSnoozes []gax.CallOption
+ GetSnooze []gax.CallOption
+ UpdateSnooze []gax.CallOption
+}
+
+func defaultSnoozeGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultSnoozeCallOptions() *SnoozeCallOptions {
+ return &SnoozeCallOptions{
+ CreateSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ ListSnoozes: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ }
+}
+
+// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalSnoozeClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+ ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator
+ GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+ UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+}
+
+// SnoozeClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The SnoozeService API is used to temporarily prevent an alert policy from
+// generating alerts. A Snooze is a description of the criteria under which one
+// or more alert policies should not fire alerts for the specified duration.
+type SnoozeClient struct {
+ // The internal transport-dependent client.
+ internalClient internalSnoozeClient
+
+ // The call options for this service.
+ CallOptions *SnoozeCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *SnoozeClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *SnoozeClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// CreateSnooze creates a Snooze that will prevent alerts, which match the provided
+// criteria, from being opened. The Snooze applies for a specific time
+// interval.
+func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.CreateSnooze(ctx, req, opts...)
+}
+
+// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in
+// filter, which specifies predicates to match Snoozes.
+func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
+ return c.internalClient.ListSnoozes(ctx, req, opts...)
+}
+
+// GetSnooze retrieves a Snooze by name.
+func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.GetSnooze(ctx, req, opts...)
+}
+
+// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the
+// given Snooze object.
+func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.UpdateSnooze(ctx, req, opts...)
+}
+
+// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type snoozeGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing SnoozeClient
+ CallOptions **SnoozeCallOptions
+
+ // The gRPC API client.
+ snoozeClient monitoringpb.SnoozeServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewSnoozeClient creates a new snooze service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The SnoozeService API is used to temporarily prevent an alert policy from
+// generating alerts. A Snooze is a description of the criteria under which one
+// or more alert policies should not fire alerts for the specified duration.
+func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) {
+ clientOpts := defaultSnoozeGRPCClientOptions()
+ if newSnoozeClientHook != nil {
+ hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()}
+
+ c := &snoozeGRPCClient{
+ connPool: connPool,
+ snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *snoozeGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *snoozeGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.CreateSnooze, req, settings.GRPC, c.logger, "CreateSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...)
+ it := &SnoozeIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) {
+ resp := &monitoringpb.ListSnoozesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.ListSnoozes, req, settings.GRPC, c.logger, "ListSnoozes")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetSnoozes(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.GetSnooze, req, settings.GRPC, c.logger, "GetSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.UpdateSnooze, req, settings.GRPC, c.logger, "UpdateSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
new file mode 100644
index 000000000..df0ec2957
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
@@ -0,0 +1,454 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newUptimeCheckClientHook clientHook
+
+// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient.
+type UptimeCheckCallOptions struct {
+ ListUptimeCheckConfigs []gax.CallOption
+ GetUptimeCheckConfig []gax.CallOption
+ CreateUptimeCheckConfig []gax.CallOption
+ UpdateUptimeCheckConfig []gax.CallOption
+ DeleteUptimeCheckConfig []gax.CallOption
+ ListUptimeCheckIps []gax.CallOption
+}
+
+func defaultUptimeCheckGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions {
+ return &UptimeCheckCallOptions{
+ ListUptimeCheckConfigs: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListUptimeCheckIps: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalUptimeCheckClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator
+ GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error
+ ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator
+}
+
+// UptimeCheckClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// Uptime check configurations in the Cloud Monitoring product. An Uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud console]
+// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
+// clicking on “Monitoring” on the left-hand side to navigate to Cloud
+// Monitoring, and then clicking on “Uptime”.
+type UptimeCheckClient struct {
+ // The internal transport-dependent client.
+ internalClient internalUptimeCheckClient
+
+ // The call options for this service.
+ CallOptions *UptimeCheckCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *UptimeCheckClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *UptimeCheckClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project
+// (leaving out any invalid configurations).
+func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...)
+}
+
+// GetUptimeCheckConfig gets a single Uptime check configuration.
+func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...)
+}
+
+// CreateUptimeCheckConfig creates a new Uptime check configuration.
+func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...)
+}
+
+// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire
+// configuration with a new one or replace only certain fields in the current
+// configuration by specifying the fields to be updated via updateMask.
+// Returns the updated configuration.
+func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...)
+}
+
+// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail
+// if the Uptime check configuration is referenced by an alert policy or
+// other dependent configs that would be rendered invalid by the deletion.
+func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...)
+}
+
+// ListUptimeCheckIps returns the list of IP addresses that checkers run from.
+func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ return c.internalClient.ListUptimeCheckIps(ctx, req, opts...)
+}
+
+// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type uptimeCheckGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing UptimeCheckClient
+ CallOptions **UptimeCheckCallOptions
+
+ // The gRPC API client.
+ uptimeCheckClient monitoringpb.UptimeCheckServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewUptimeCheckClient creates a new uptime check service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// Uptime check configurations in the Cloud Monitoring product. An Uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud console]
+// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
+// clicking on “Monitoring” on the left-hand side to navigate to Cloud
+// Monitoring, and then clicking on “Uptime”.
+func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
+ clientOpts := defaultUptimeCheckGRPCClientOptions()
+ if newUptimeCheckClientHook != nil {
+ hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()}
+
+ c := &uptimeCheckGRPCClient{
+ connPool: connPool,
+ uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *uptimeCheckGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...)
+ it := &UptimeCheckConfigIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) {
+ resp := &monitoringpb.ListUptimeCheckConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckConfigs, req, settings.GRPC, c.logger, "ListUptimeCheckConfigs")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.GetUptimeCheckConfig, req, settings.GRPC, c.logger, "GetUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.CreateUptimeCheckConfig, req, settings.GRPC, c.logger, "CreateUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.UpdateUptimeCheckConfig, req, settings.GRPC, c.logger, "UpdateUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.uptimeCheckClient.DeleteUptimeCheckConfig, req, settings.GRPC, c.logger, "DeleteUptimeCheckConfig")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
+ opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...)
+ it := &UptimeCheckIpIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) {
+ resp := &monitoringpb.ListUptimeCheckIpsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckIps, req, settings.GRPC, c.logger, "ListUptimeCheckIps")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
new file mode 100644
index 000000000..accff0f5e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
@@ -0,0 +1,23 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapicgen. DO NOT EDIT.
+
+package monitoring
+
+import "cloud.google.com/go/monitoring/internal"
+
+func init() {
+ versionClient = internal.Version
+}
diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go
new file mode 100644
index 000000000..3e6c62c92
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/internal/version.go
@@ -0,0 +1,18 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "1.24.1"
diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json
new file mode 100644
index 000000000..529f7db35
--- /dev/null
+++ b/vendor/cloud.google.com/go/release-please-config-individual.json
@@ -0,0 +1,54 @@
+{
+ "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
+ "release-type": "go-yoshi",
+ "include-component-in-tag": true,
+ "separate-pull-requests": true,
+ "tag-separator": "/",
+ "packages": {
+ "auth": {
+ "component": "auth"
+ },
+ "auth/oauth2adapt": {
+ "component": "auth/oauth2adapt"
+ },
+ "bigquery": {
+ "component": "bigquery"
+ },
+ "bigtable": {
+ "component": "bigtable"
+ },
+ "datastore": {
+ "component": "datastore"
+ },
+ "errorreporting": {
+ "component": "errorreporting"
+ },
+ "firestore": {
+ "component": "firestore"
+ },
+ "logging": {
+ "component": "logging"
+ },
+ "profiler": {
+ "component": "profiler"
+ },
+ "pubsub": {
+ "component": "pubsub"
+ },
+ "pubsublite": {
+ "component": "pubsublite"
+ },
+ "spanner": {
+ "component": "spanner"
+ },
+ "storage": {
+ "component": "storage"
+ },
+ "vertexai": {
+ "component": "vertexai"
+ }
+ },
+ "plugins": [
+ "sentence-case"
+ ]
+}
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
new file mode 100644
index 000000000..4d8d236ed
--- /dev/null
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -0,0 +1,472 @@
+{
+ "release-type": "go-yoshi",
+ "include-component-in-tag": true,
+ "tag-separator": "/",
+ "packages": {
+ "accessapproval": {
+ "component": "accessapproval"
+ },
+ "accesscontextmanager": {
+ "component": "accesscontextmanager"
+ },
+ "advisorynotifications": {
+ "component": "advisorynotifications"
+ },
+ "ai": {
+ "component": "ai"
+ },
+ "aiplatform": {
+ "component": "aiplatform"
+ },
+ "alloydb": {
+ "component": "alloydb"
+ },
+ "analytics": {
+ "component": "analytics"
+ },
+ "apigateway": {
+ "component": "apigateway"
+ },
+ "apigeeconnect": {
+ "component": "apigeeconnect"
+ },
+ "apigeeregistry": {
+ "component": "apigeeregistry"
+ },
+ "apihub": {
+ "component": "apihub"
+ },
+ "apikeys": {
+ "component": "apikeys"
+ },
+ "appengine": {
+ "component": "appengine"
+ },
+ "apphub": {
+ "component": "apphub"
+ },
+ "apps": {
+ "component": "apps"
+ },
+ "area120": {
+ "component": "area120"
+ },
+ "artifactregistry": {
+ "component": "artifactregistry"
+ },
+ "asset": {
+ "component": "asset"
+ },
+ "assuredworkloads": {
+ "component": "assuredworkloads"
+ },
+ "automl": {
+ "component": "automl"
+ },
+ "backupdr": {
+ "component": "backupdr"
+ },
+ "baremetalsolution": {
+ "component": "baremetalsolution"
+ },
+ "batch": {
+ "component": "batch"
+ },
+ "beyondcorp": {
+ "component": "beyondcorp"
+ },
+ "billing": {
+ "component": "billing"
+ },
+ "binaryauthorization": {
+ "component": "binaryauthorization"
+ },
+ "certificatemanager": {
+ "component": "certificatemanager"
+ },
+ "channel": {
+ "component": "channel"
+ },
+ "chat": {
+ "component": "chat"
+ },
+ "cloudbuild": {
+ "component": "cloudbuild"
+ },
+ "cloudcontrolspartner": {
+ "component": "cloudcontrolspartner"
+ },
+ "clouddms": {
+ "component": "clouddms"
+ },
+ "cloudprofiler": {
+ "component": "cloudprofiler"
+ },
+ "cloudquotas": {
+ "component": "cloudquotas"
+ },
+ "cloudtasks": {
+ "component": "cloudtasks"
+ },
+ "commerce": {
+ "component": "commerce"
+ },
+ "compute": {
+ "component": "compute"
+ },
+ "compute/metadata": {
+ "component": "compute/metadata"
+ },
+ "confidentialcomputing": {
+ "component": "confidentialcomputing"
+ },
+ "config": {
+ "component": "config"
+ },
+ "contactcenterinsights": {
+ "component": "contactcenterinsights"
+ },
+ "container": {
+ "component": "container"
+ },
+ "containeranalysis": {
+ "component": "containeranalysis"
+ },
+ "datacatalog": {
+ "component": "datacatalog"
+ },
+ "dataflow": {
+ "component": "dataflow"
+ },
+ "dataform": {
+ "component": "dataform"
+ },
+ "datafusion": {
+ "component": "datafusion"
+ },
+ "datalabeling": {
+ "component": "datalabeling"
+ },
+ "dataplex": {
+ "component": "dataplex"
+ },
+ "dataproc": {
+ "component": "dataproc"
+ },
+ "dataqna": {
+ "component": "dataqna"
+ },
+ "datastream": {
+ "component": "datastream"
+ },
+ "deploy": {
+ "component": "deploy"
+ },
+ "developerconnect": {
+ "component": "developerconnect"
+ },
+ "dialogflow": {
+ "component": "dialogflow"
+ },
+ "discoveryengine": {
+ "component": "discoveryengine"
+ },
+ "dlp": {
+ "component": "dlp"
+ },
+ "documentai": {
+ "component": "documentai"
+ },
+ "domains": {
+ "component": "domains"
+ },
+ "edgecontainer": {
+ "component": "edgecontainer"
+ },
+ "edgenetwork": {
+ "component": "edgenetwork"
+ },
+ "essentialcontacts": {
+ "component": "essentialcontacts"
+ },
+ "eventarc": {
+ "component": "eventarc"
+ },
+ "filestore": {
+ "component": "filestore"
+ },
+ "financialservices": {
+ "component": "financialservices"
+ },
+ "functions": {
+ "component": "functions"
+ },
+ "gkebackup": {
+ "component": "gkebackup"
+ },
+ "gkeconnect": {
+ "component": "gkeconnect"
+ },
+ "gkehub": {
+ "component": "gkehub"
+ },
+ "gkemulticloud": {
+ "component": "gkemulticloud"
+ },
+ "grafeas": {
+ "component": "grafeas"
+ },
+ "gsuiteaddons": {
+ "component": "gsuiteaddons"
+ },
+ "iam": {
+ "component": "iam"
+ },
+ "iap": {
+ "component": "iap"
+ },
+ "identitytoolkit": {
+ "component": "identitytoolkit"
+ },
+ "ids": {
+ "component": "ids"
+ },
+ "iot": {
+ "component": "iot"
+ },
+ "kms": {
+ "component": "kms"
+ },
+ "language": {
+ "component": "language"
+ },
+ "lifesciences": {
+ "component": "lifesciences"
+ },
+ "longrunning": {
+ "component": "longrunning"
+ },
+ "managedidentities": {
+ "component": "managedidentities"
+ },
+ "managedkafka": {
+ "component": "managedkafka"
+ },
+ "maps": {
+ "component": "maps"
+ },
+ "mediatranslation": {
+ "component": "mediatranslation"
+ },
+ "memcache": {
+ "component": "memcache"
+ },
+ "memorystore": {
+ "component": "memorystore"
+ },
+ "metastore": {
+ "component": "metastore"
+ },
+ "migrationcenter": {
+ "component": "migrationcenter"
+ },
+ "modelarmor": {
+ "component": "modelarmor"
+ },
+ "monitoring": {
+ "component": "monitoring"
+ },
+ "netapp": {
+ "component": "netapp"
+ },
+ "networkconnectivity": {
+ "component": "networkconnectivity"
+ },
+ "networkmanagement": {
+ "component": "networkmanagement"
+ },
+ "networksecurity": {
+ "component": "networksecurity"
+ },
+ "networkservices": {
+ "component": "networkservices"
+ },
+ "notebooks": {
+ "component": "notebooks"
+ },
+ "optimization": {
+ "component": "optimization"
+ },
+ "oracledatabase": {
+ "component": "oracledatabase"
+ },
+ "orchestration": {
+ "component": "orchestration"
+ },
+ "orgpolicy": {
+ "component": "orgpolicy"
+ },
+ "osconfig": {
+ "component": "osconfig"
+ },
+ "oslogin": {
+ "component": "oslogin"
+ },
+ "parallelstore": {
+ "component": "parallelstore"
+ },
+ "parametermanager": {
+ "component": "parametermanager"
+ },
+ "phishingprotection": {
+ "component": "phishingprotection"
+ },
+ "policysimulator": {
+ "component": "policysimulator"
+ },
+ "policytroubleshooter": {
+ "component": "policytroubleshooter"
+ },
+ "privatecatalog": {
+ "component": "privatecatalog"
+ },
+ "privilegedaccessmanager": {
+ "component": "privilegedaccessmanager"
+ },
+ "rapidmigrationassessment": {
+ "component": "rapidmigrationassessment"
+ },
+ "recaptchaenterprise": {
+ "component": "recaptchaenterprise"
+ },
+ "recommendationengine": {
+ "component": "recommendationengine"
+ },
+ "recommender": {
+ "component": "recommender"
+ },
+ "redis": {
+ "component": "redis"
+ },
+ "resourcemanager": {
+ "component": "resourcemanager"
+ },
+ "retail": {
+ "component": "retail"
+ },
+ "run": {
+ "component": "run"
+ },
+ "scheduler": {
+ "component": "scheduler"
+ },
+ "secretmanager": {
+ "component": "secretmanager"
+ },
+ "securesourcemanager": {
+ "component": "securesourcemanager"
+ },
+ "security": {
+ "component": "security"
+ },
+ "securitycenter": {
+ "component": "securitycenter"
+ },
+ "securitycentermanagement": {
+ "component": "securitycentermanagement"
+ },
+ "securityposture": {
+ "component": "securityposture"
+ },
+ "servicecontrol": {
+ "component": "servicecontrol"
+ },
+ "servicedirectory": {
+ "component": "servicedirectory"
+ },
+ "servicehealth": {
+ "component": "servicehealth"
+ },
+ "servicemanagement": {
+ "component": "servicemanagement"
+ },
+ "serviceusage": {
+ "component": "serviceusage"
+ },
+ "shell": {
+ "component": "shell"
+ },
+ "shopping": {
+ "component": "shopping"
+ },
+ "speech": {
+ "component": "speech"
+ },
+ "storageinsights": {
+ "component": "storageinsights"
+ },
+ "storagetransfer": {
+ "component": "storagetransfer"
+ },
+ "streetview": {
+ "component": "streetview"
+ },
+ "support": {
+ "component": "support"
+ },
+ "talent": {
+ "component": "talent"
+ },
+ "telcoautomation": {
+ "component": "telcoautomation"
+ },
+ "texttospeech": {
+ "component": "texttospeech"
+ },
+ "tpu": {
+ "component": "tpu"
+ },
+ "trace": {
+ "component": "trace"
+ },
+ "translate": {
+ "component": "translate"
+ },
+ "video": {
+ "component": "video"
+ },
+ "videointelligence": {
+ "component": "videointelligence"
+ },
+ "vision": {
+ "component": "vision"
+ },
+ "visionai": {
+ "component": "visionai"
+ },
+ "vmmigration": {
+ "component": "vmmigration"
+ },
+ "vmwareengine": {
+ "component": "vmwareengine"
+ },
+ "vpcaccess": {
+ "component": "vpcaccess"
+ },
+ "webrisk": {
+ "component": "webrisk"
+ },
+ "websecurityscanner": {
+ "component": "websecurityscanner"
+ },
+ "workflows": {
+ "component": "workflows"
+ },
+ "workstations": {
+ "component": "workstations"
+ }
+ },
+ "plugins": [
+ "sentence-case"
+ ]
+}
diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json
new file mode 100644
index 000000000..1400245b8
--- /dev/null
+++ b/vendor/cloud.google.com/go/release-please-config.json
@@ -0,0 +1,11 @@
+{
+ "release-type": "go-yoshi",
+ "separate-pull-requests": true,
+ "include-component-in-tag": false,
+ "packages": {
+ ".": {
+ "component": "main"
+ }
+ },
+ "plugins": ["sentence-case"]
+}
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index 8b3fa6fc4..f04efaaa6 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,309 @@
# Changes
+## [1.51.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.50.0...storage/v1.51.0) (2025-03-07)
+
+
+### Features
+
+* **storage/append:** Support appends in w1r3. ([#11483](https://github.com/googleapis/google-cloud-go/issues/11483)) ([48bb391](https://github.com/googleapis/google-cloud-go/commit/48bb39154479a2cf2d379316e0915f39d7b7a518))
+* **storage:** Benchmark with experimental MRD. ([#11501](https://github.com/googleapis/google-cloud-go/issues/11501)) ([7b49152](https://github.com/googleapis/google-cloud-go/commit/7b491520a693d258d3370a19c43c9dff6c8558c7))
+* **storage:** Implement RetryChunkDeadline for grpc writes ([#11476](https://github.com/googleapis/google-cloud-go/issues/11476)) ([03575d7](https://github.com/googleapis/google-cloud-go/commit/03575d74f5241cc714e4d3ac63635569a34f5633))
+* **storage:** Specify benchmark integrity check. ([#11465](https://github.com/googleapis/google-cloud-go/issues/11465)) ([da18845](https://github.com/googleapis/google-cloud-go/commit/da188453e0254c49a01d28788d0849a2d0e98e0c))
+* **storage:** Use ReadHandle for faster re-connect ([#11510](https://github.com/googleapis/google-cloud-go/issues/11510)) ([cac52f7](https://github.com/googleapis/google-cloud-go/commit/cac52f79a73d46774d33d76e3075c0a5b3e0b9f3))
+* **storage:** Wrap NotFound errors for buckets and objects ([#11519](https://github.com/googleapis/google-cloud-go/issues/11519)) ([0dd7d3d](https://github.com/googleapis/google-cloud-go/commit/0dd7d3d62e54c6c3bca395fcca8450ad3347a5a0))
+
+
+### Bug Fixes
+
+* **storage/append:** Report progress for appends. ([#11503](https://github.com/googleapis/google-cloud-go/issues/11503)) ([96dbb6c](https://github.com/googleapis/google-cloud-go/commit/96dbb6c12398fb3cbffab2bf61836bef2f704f66))
+* **storage:** Add a safety check for readhandle ([#11549](https://github.com/googleapis/google-cloud-go/issues/11549)) ([c9edb37](https://github.com/googleapis/google-cloud-go/commit/c9edb379ece70f065650702c9240ee540ca2f610))
+* **storage:** Add universe domain to defaultSignBytesFunc ([#11521](https://github.com/googleapis/google-cloud-go/issues/11521)) ([511608b](https://github.com/googleapis/google-cloud-go/commit/511608b8e8554aa06f9fe2e2e4f51ead0f484031))
+* **storage:** Clone the defaultRetry to avoid modifying it directly ([#11533](https://github.com/googleapis/google-cloud-go/issues/11533)) ([7f8d69d](https://github.com/googleapis/google-cloud-go/commit/7f8d69dcd6a7b1ad6c1df8d9fe8dfb5fe0947479))
+* **storage:** Fix adding multiple range on stream with same read id ([#11584](https://github.com/googleapis/google-cloud-go/issues/11584)) ([0bb3434](https://github.com/googleapis/google-cloud-go/commit/0bb3434e0e12563ff21ef72ad2e52ad7eb61d66e))
+* **storage:** Modify the callback of mrd to return length of data read instead of limit. ([#11687](https://github.com/googleapis/google-cloud-go/issues/11687)) ([9e359f0](https://github.com/googleapis/google-cloud-go/commit/9e359f0089f744c32d12bf77889d69a4db155357))
+* **storage:** Propagate ctx from invoke to grpc upload reqs ([#11475](https://github.com/googleapis/google-cloud-go/issues/11475)) ([9ad9d76](https://github.com/googleapis/google-cloud-go/commit/9ad9d7665ca2f4cfdcee75f5e683084ac49536a6))
+* **storage:** Remove duplicate routing header ([#11534](https://github.com/googleapis/google-cloud-go/issues/11534)) ([8eeb59c](https://github.com/googleapis/google-cloud-go/commit/8eeb59cbfb16d8f379f7aa4c6f11e53cebbd38a6))
+* **storage:** Return sentinel ErrObjectNotExist for copy and compose ([#11369](https://github.com/googleapis/google-cloud-go/issues/11369)) ([74d0c10](https://github.com/googleapis/google-cloud-go/commit/74d0c1096f897ca3c15646f3049ea540bed0a6a0)), refs [#10760](https://github.com/googleapis/google-cloud-go/issues/10760)
+* **storage:** Wait for XML read req to finish to avoid data races ([#11527](https://github.com/googleapis/google-cloud-go/issues/11527)) ([782e12a](https://github.com/googleapis/google-cloud-go/commit/782e12a11c1dfe6d831f5d0b9b5f4409993e4d9e))
+
+## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.49.0...storage/v1.50.0) (2025-01-09)
+
+
+### Features
+
+* **storage/internal:** Add new appendable Object to BidiWrite API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage/internal:** Add new preview BidiReadObject API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage:** Add support for gRPC bi-directional multi-range reads. This API is in private preview and not generally and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Add support for ReadHandle, a gRPC feature that allows for accelerated resumption of streams when one is interrupted. ReadHandle requires the bi-directional read API, which is in private preview and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Support appendable semantics for writes in gRPC. This API is in preview. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Refactor gRPC writer flow ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+
+
+### Bug Fixes
+
+* **storage:** Add mutex around uses of mrd variables ([#11405](https://github.com/googleapis/google-cloud-go/issues/11405)) ([54bfc32](https://github.com/googleapis/google-cloud-go/commit/54bfc32db7a0ff40a493de4d466f21ad624de04e))
+* **storage:** Return the appropriate error for method not supported ([#11416](https://github.com/googleapis/google-cloud-go/issues/11416)) ([56d704e](https://github.com/googleapis/google-cloud-go/commit/56d704e3037840aeb87b22cc83f2b6088c79bcee))
+
+
+### Documentation
+
+* **storage/internal:** Add IAM information to RPC comments for reference documentation ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage:** Add preview comment to NewMultiRangeDownloader ([#11420](https://github.com/googleapis/google-cloud-go/issues/11420)) ([4ec1d66](https://github.com/googleapis/google-cloud-go/commit/4ec1d66ee180e800606568e8693a282645ec7369))
+
+## [1.49.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.48.0...storage/v1.49.0) (2024-12-21)
+
+
+### Features
+
+* **storage/internal:** Add finalize_time field in Object metadata ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
+* **storage/internal:** Add MoveObject RPC ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
+* **storage:** Add ObjectHandle.Move method ([#11302](https://github.com/googleapis/google-cloud-go/issues/11302)) ([a3cb8c4](https://github.com/googleapis/google-cloud-go/commit/a3cb8c4fc48883b54d4e830ae5f5ef4f1a3b8ca3))
+* **storage:** Return file metadata on read ([#11212](https://github.com/googleapis/google-cloud-go/issues/11212)) ([d49263b](https://github.com/googleapis/google-cloud-go/commit/d49263b2ab614cad801e26b4a169eafe08d4a2a0))
+
+
+### Bug Fixes
+
+* **storage/dataflux:** Address deadlock when reading from ranges ([#11303](https://github.com/googleapis/google-cloud-go/issues/11303)) ([32cbf56](https://github.com/googleapis/google-cloud-go/commit/32cbf561590541eb0387787bf729be6ddf68e4ee))
+* **storage:** Disable allow non-default credentials flag ([#11337](https://github.com/googleapis/google-cloud-go/issues/11337)) ([145ddf4](https://github.com/googleapis/google-cloud-go/commit/145ddf4f6123d9561856d2b6adeefdfae462b3f7))
+* **storage:** Monitored resource detection ([#11197](https://github.com/googleapis/google-cloud-go/issues/11197)) ([911bcd8](https://github.com/googleapis/google-cloud-go/commit/911bcd8b1816256482bd52e85da7eaf00c315293))
+* **storage:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.47.0...storage/v1.48.0) (2024-12-05)
+
+
+### Features
+
+* **storage/dataflux:** Run worksteal listing parallel to sequential listing ([#10966](https://github.com/googleapis/google-cloud-go/issues/10966)) ([3005f5a](https://github.com/googleapis/google-cloud-go/commit/3005f5a86c18254e569b8b1782bf014aa62f33cc))
+* **storage:** Add Writer.ChunkTransferTimeout ([#11111](https://github.com/googleapis/google-cloud-go/issues/11111)) ([fd1db20](https://github.com/googleapis/google-cloud-go/commit/fd1db203d0de898891b9920aacb141ea39228609))
+* **storage:** Allow non default service account ([#11137](https://github.com/googleapis/google-cloud-go/issues/11137)) ([19f01c3](https://github.com/googleapis/google-cloud-go/commit/19f01c3c48ed1272c8fc0af9e5f69646cb662808))
+
+
+### Bug Fixes
+
+* **storage:** Add backoff to gRPC write retries ([#11200](https://github.com/googleapis/google-cloud-go/issues/11200)) ([a7db927](https://github.com/googleapis/google-cloud-go/commit/a7db927da9cf4c6cf242a5db83e44a16d75a8291))
+* **storage:** Correct direct connectivity check ([#11152](https://github.com/googleapis/google-cloud-go/issues/11152)) ([a75c8b0](https://github.com/googleapis/google-cloud-go/commit/a75c8b0f72c38d9a85c908715c3e37eb5cffb131))
+* **storage:** Disable soft delete policy using 0 retentionDurationSeconds ([#11226](https://github.com/googleapis/google-cloud-go/issues/11226)) ([f087721](https://github.com/googleapis/google-cloud-go/commit/f087721b7b20ad28ded1d0a84756a8bbaa2bb95a))
+* **storage:** Retry SignBlob call for URL signing ([#11154](https://github.com/googleapis/google-cloud-go/issues/11154)) ([f198452](https://github.com/googleapis/google-cloud-go/commit/f198452fd2b29e779e9080ba79d7e873eb0c32ef))
+
+## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.46.0...storage/v1.47.0) (2024-11-14)
+
+
+### Features
+
+* **storage:** Introduce dp detector based on grpc metrics ([#11100](https://github.com/googleapis/google-cloud-go/issues/11100)) ([60c2323](https://github.com/googleapis/google-cloud-go/commit/60c2323102b623e042fc508e2b1bb830a03f9577))
+
+
+### Bug Fixes
+
+* **storage:** Bump auth dep ([#11135](https://github.com/googleapis/google-cloud-go/issues/11135)) ([9620a51](https://github.com/googleapis/google-cloud-go/commit/9620a51b2c6904d8d93e124494bc297fb98553d2))
+
+## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.45.0...storage/v1.46.0) (2024-10-31)
+
+### Features
+
+* **storage:** Add grpc metrics experimental options ([#10984](https://github.com/googleapis/google-cloud-go/issues/10984)) ([5b7397b](https://github.com/googleapis/google-cloud-go/commit/5b7397b169176f030049e1511859a883422c774e))
+
+
+### Bug Fixes
+
+* **storage:** Skip only specific transport tests. ([#11016](https://github.com/googleapis/google-cloud-go/issues/11016)) ([d40fbff](https://github.com/googleapis/google-cloud-go/commit/d40fbff9c1984aeed0224a4ac93eb95c5af17126))
+* **storage:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **storage:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([2b8ca4b](https://github.com/googleapis/google-cloud-go/commit/2b8ca4b4127ce3025c7a21cc7247510e07cc5625))
+
+
+### Miscellaneous Chores
+
+* **storage/internal:** Remove notification, service account, and hmac RPCS. These API have been migrated to Storage Control and are available via the JSON API. ([#11008](https://github.com/googleapis/google-cloud-go/issues/11008)) ([e0759f4](https://github.com/googleapis/google-cloud-go/commit/e0759f46639b4c542e5b49e4dc81340d8e123370))
+
+## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.44.0...storage/v1.45.0) (2024-10-17)
+
+
+### Features
+
+* **storage/internal:** Adds support for restore token ([70d82fe](https://github.com/googleapis/google-cloud-go/commit/70d82fe93f60f1075298a077ce1616f9ae7e13fe))
+* **storage:** Adding bucket-specific dynamicDelay ([#10987](https://github.com/googleapis/google-cloud-go/issues/10987)) ([a807a7e](https://github.com/googleapis/google-cloud-go/commit/a807a7e7f9fb002374407622c126102c5e61af82))
+* **storage:** Dynamic read request stall timeout ([#10958](https://github.com/googleapis/google-cloud-go/issues/10958)) ([a09f00e](https://github.com/googleapis/google-cloud-go/commit/a09f00eeecac82af98ae769bab284ee58a3a66cb))
+
+
+### Documentation
+
+* **storage:** Remove preview wording from NewGRPCClient ([#11002](https://github.com/googleapis/google-cloud-go/issues/11002)) ([40c3a5b](https://github.com/googleapis/google-cloud-go/commit/40c3a5b9c4cd4db2f1695e180419197b6a03ed7f))
+
+## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03)
+
+
+### Features
+
+* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f))
+* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2))
+* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f))
+* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545))
+* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8))
+* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb))
+* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88))
+* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b))
+* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361)
+* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34))
+* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8))
+* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c))
+* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567)
+* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978))
+* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d))
+* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0))
+* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2))
+* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+
+### Performance Improvements
+
+* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d))
+
+
+### Documentation
+
+* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+
+## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03)
+
+
+### Features
+
+* **storage/transfermanager:** Add DownloadDirectory ([#10430](https://github.com/googleapis/google-cloud-go/issues/10430)) ([0d0e5dd](https://github.com/googleapis/google-cloud-go/commit/0d0e5dd5214769cc2c197991c2ece1303bd600de))
+* **storage/transfermanager:** Automatically shard downloads ([#10379](https://github.com/googleapis/google-cloud-go/issues/10379)) ([05816f9](https://github.com/googleapis/google-cloud-go/commit/05816f9fafd3132c371da37f3a879bb9e8e7e604))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** WaitAndClose waits for Callbacks to finish ([#10504](https://github.com/googleapis/google-cloud-go/issues/10504)) ([0e81002](https://github.com/googleapis/google-cloud-go/commit/0e81002b3a5e560c874d814d28a35a102311d9ef)), refs [#10502](https://github.com/googleapis/google-cloud-go/issues/10502)
+* **storage:** Allow empty soft delete on Create ([#10394](https://github.com/googleapis/google-cloud-go/issues/10394)) ([d8bd2c1](https://github.com/googleapis/google-cloud-go/commit/d8bd2c1ffc4f27503a74ded438d8bfbdd7707c63)), refs [#10380](https://github.com/googleapis/google-cloud-go/issues/10380)
+* **storage:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **storage:** Retry broken pipe error ([#10374](https://github.com/googleapis/google-cloud-go/issues/10374)) ([2f4daa1](https://github.com/googleapis/google-cloud-go/commit/2f4daa11acf9d3f260fa888333090359c4d9198e)), refs [#9178](https://github.com/googleapis/google-cloud-go/issues/9178)
+
+
+### Documentation
+
+* **storage/control:** Remove allowlist note from Folders RPCs ([d6c543c](https://github.com/googleapis/google-cloud-go/commit/d6c543c3969016c63e158a862fc173dff60fb8d9))
+
+## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10)
+
+
+### Features
+
+* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71))
+* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146)
+* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05))
+
+
+### Bug Fixes
+
+* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf))
+
+
+### Documentation
+
+* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd))
+
+## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13)
+
+
+### Features
+
+* **storage/control:** Make Managed Folders operations public ([264a6dc](https://github.com/googleapis/google-cloud-go/commit/264a6dcddbffaec987dce1dc00f6550c263d2df7))
+* **storage:** Support for soft delete policies and restore ([#9520](https://github.com/googleapis/google-cloud-go/issues/9520)) ([985deb2](https://github.com/googleapis/google-cloud-go/commit/985deb2bdd1c79944cdd960bd3fbfa38cbfa1c91))
+
+
+### Bug Fixes
+
+* **storage/control:** An existing resource pattern value `projects/{project}/buckets/{bucket}/managedFolders/{managedFolder=**}` to resource definition `storage.googleapis.com/ManagedFolder` is removed ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289))
+* **storage:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90))
+* **storage:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+* **storage:** Disable gax retries for gRPC ([#9747](https://github.com/googleapis/google-cloud-go/issues/9747)) ([bbfc0ac](https://github.com/googleapis/google-cloud-go/commit/bbfc0acc272f21bf1f558ea23648183d5a11cda5))
+* **storage:** More strongly match regex ([#9706](https://github.com/googleapis/google-cloud-go/issues/9706)) ([3cfc8eb](https://github.com/googleapis/google-cloud-go/commit/3cfc8eb418e064d734bf3d8708162062dbbe988f)), refs [#9705](https://github.com/googleapis/google-cloud-go/issues/9705)
+* **storage:** Retry net.OpError on connection reset ([#10154](https://github.com/googleapis/google-cloud-go/issues/10154)) ([54fab10](https://github.com/googleapis/google-cloud-go/commit/54fab107f98b4f79c9df2959a05b981be0a613c1)), refs [#9478](https://github.com/googleapis/google-cloud-go/issues/9478)
+* **storage:** Wrap error when MaxAttempts is hit ([#9767](https://github.com/googleapis/google-cloud-go/issues/9767)) ([9cb262b](https://github.com/googleapis/google-cloud-go/commit/9cb262bb65a162665bfb8bed0022615131bae1f2)), refs [#9720](https://github.com/googleapis/google-cloud-go/issues/9720)
+
+
+### Documentation
+
+* **storage/control:** Update storage control documentation and add PHP for publishing ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c))
+
+## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29)
+
+
+### Features
+
+* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3))
+* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330))
+
+
+### Bug Fixes
+
+* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9))
+* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+
+### Performance Improvements
+
+* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca))
+
+## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11)
+
+
+### Bug Fixes
+
+* **storage:** Add object validation case and test ([#9521](https://github.com/googleapis/google-cloud-go/issues/9521)) ([386bef3](https://github.com/googleapis/google-cloud-go/commit/386bef319b4678beaa926ddfe4edef190f11b68d))
+
+## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.38.0...storage/v1.39.0) (2024-02-29)
+
+
+### Features
+
+* **storage:** Make it possible to disable Content-Type sniffing ([#9431](https://github.com/googleapis/google-cloud-go/issues/9431)) ([0676670](https://github.com/googleapis/google-cloud-go/commit/067667058c06689b64401be11858d84441584039))
+
+## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.37.0...storage/v1.38.0) (2024-02-12)
+
+
+### Features
+
+* **storage:** Support auto-detection of access ID for external_account creds ([#9208](https://github.com/googleapis/google-cloud-go/issues/9208)) ([b958d44](https://github.com/googleapis/google-cloud-go/commit/b958d44589f2b6b226ea3bef23829ac75a0aa6a6))
+* **storage:** Support custom hostname for VirtualHostedStyle SignedURLs ([#9348](https://github.com/googleapis/google-cloud-go/issues/9348)) ([7eec40e](https://github.com/googleapis/google-cloud-go/commit/7eec40e4cf82c53e5bf02bd2c14e0b25043da6d0))
+* **storage:** Support universe domains ([#9344](https://github.com/googleapis/google-cloud-go/issues/9344)) ([29a7498](https://github.com/googleapis/google-cloud-go/commit/29a7498b8eb0d00fdb5acd7ee8ce0e5a2a8c11ce))
+
+
+### Bug Fixes
+
+* **storage:** Fix v4 url signing for hosts that specify ports ([#9347](https://github.com/googleapis/google-cloud-go/issues/9347)) ([f127b46](https://github.com/googleapis/google-cloud-go/commit/f127b4648f861c1ba44f41a280a62652620c04c2))
+
+
+### Documentation
+
+* **storage:** Indicate that gRPC is incompatible with universe domains ([#9386](https://github.com/googleapis/google-cloud-go/issues/9386)) ([e8bd85b](https://github.com/googleapis/google-cloud-go/commit/e8bd85bbce12d5f7ab87fa49d166a6a0d84bd12d))
+
+## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24)
+
+
+### Features
+
+* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c))
+* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51))
+
+
+### Bug Fixes
+
+* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189)
+
## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14)
diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go
index 74799e55e..a894db605 100644
--- a/vendor/cloud.google.com/go/storage/acl.go
+++ b/vendor/cloud.google.com/go/storage/acl.go
@@ -16,10 +16,7 @@ package storage
import (
"context"
- "net/http"
- "reflect"
- "cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
@@ -79,8 +76,8 @@ type ACLHandle struct {
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "ACL.Delete")
+ defer func() { endSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
@@ -93,8 +90,8 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "ACL.Set")
+ defer func() { endSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
@@ -107,8 +104,8 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (er
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "ACL.List")
+ defer func() { endSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
@@ -162,15 +159,6 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
}
-func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
- vc := reflect.ValueOf(call)
- vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
- if a.userProject != "" {
- vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
- }
- setClientHeader(call.Header())
-}
-
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index 1059d4e8b..6c14b3a18 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -26,7 +26,6 @@ import (
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/internal/optional"
- "cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/api/googleapi"
"google.golang.org/api/iamcredentials/v1"
@@ -82,8 +81,8 @@ func (c *Client) Bucket(name string) *BucketHandle {
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.Create")
+ defer func() { endSpan(ctx, err) }()
o := makeStorageOpts(true, b.retry, b.userProject)
@@ -95,8 +94,8 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
// Delete deletes the Bucket.
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.Delete")
+ defer func() { endSpan(ctx, err) }()
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
@@ -116,6 +115,11 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
return &b.defaultObjectACL
}
+// BucketName returns the name of the bucket.
+func (b *BucketHandle) BucketName() string {
+ return b.name
+}
+
// Object returns an ObjectHandle, which provides operations on the named object.
// This call does not perform any network operations such as fetching the object or verifying its existence.
// Use methods on ObjectHandle to perform network operations.
@@ -145,8 +149,8 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
// Attrs returns the metadata for the bucket.
func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.Attrs")
+ defer func() { endSpan(ctx, err) }()
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
@@ -154,8 +158,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
// Update updates a bucket's attributes.
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.Update")
+ defer func() { endSpan(ctx, err) }()
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
@@ -275,18 +279,24 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
err := json.Unmarshal(b.c.creds.JSON, &sa)
if err != nil {
returnErr = err
- } else if sa.CredType == "impersonated_service_account" {
- start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
-
- if end <= start {
- returnErr = errors.New("error parsing impersonated service account credentials")
- } else {
- return sa.SAImpersonationURL[start+1 : end], nil
- }
- } else if sa.CredType == "service_account" && sa.ClientEmail != "" {
- return sa.ClientEmail, nil
} else {
- returnErr = errors.New("unable to parse credentials; only service_account and impersonated_service_account credentials are supported")
+ switch sa.CredType {
+ case "impersonated_service_account", "external_account":
+ start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
+
+ if end <= start {
+ returnErr = errors.New("error parsing external or impersonated service account credentials")
+ } else {
+ return sa.SAImpersonationURL[start+1 : end], nil
+ }
+ case "service_account":
+ if sa.ClientEmail != "" {
+ return sa.ClientEmail, nil
+ }
+ returnErr = errors.New("empty service account client email")
+ default:
+ returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported")
+ }
}
}
@@ -302,24 +312,37 @@ func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
}
}
- return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_[BucketHandle.SignedURL]_and_[BucketHandle.GenerateSignedPostPolicyV4])", returnErr)
+ return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr)
}
func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) {
return func(in []byte) ([]byte, error) {
ctx := context.Background()
+ opts := []option.ClientOption{option.WithHTTPClient(b.c.hc)}
+
+ if b.c.creds != nil {
+ universeDomain, err := b.c.creds.GetUniverseDomain()
+ if err != nil {
+ return nil, err
+ }
+ opts = append(opts, option.WithUniverseDomain(universeDomain))
+ }
+
// It's ok to recreate this service per call since we pass in the http client,
// circumventing the cost of recreating the auth/transport layer
- svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
+ svc, err := iamcredentials.NewService(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
}
-
- resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
- Payload: base64.StdEncoding.EncodeToString(in),
- }).Do()
- if err != nil {
+ // Do the SignBlob call with a retry for transient errors.
+ var resp *iamcredentials.SignBlobResponse
+ if err := run(ctx, func(ctx context.Context) error {
+ resp, err = svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
+ Payload: base64.StdEncoding.EncodeToString(in),
+ }).Do()
+ return err
+ }, b.retry, true); err != nil {
return nil, fmt.Errorf("unable to sign bytes: %w", err)
}
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
@@ -405,6 +428,10 @@ type BucketAttrs struct {
// This field is read-only.
Created time.Time
+ // Updated is the time at which the bucket was last modified.
+ // This field is read-only.
+ Updated time.Time
+
// VersioningEnabled reports whether this bucket has versioning enabled.
VersioningEnabled bool
@@ -473,6 +500,20 @@ type BucketAttrs struct {
// cannot be modified once the bucket is created.
// ObjectRetention cannot be configured or reported through the gRPC API.
ObjectRetentionMode string
+
+ // SoftDeletePolicy contains the bucket's soft delete policy, which defines
+ // the period of time that soft-deleted objects will be retained, and cannot
+ // be permanently deleted. By default, new buckets will be created with a
+ // 7 day retention duration. In order to fully disable soft delete, you need
+ // to set a policy with a RetentionDuration of 0.
+ SoftDeletePolicy *SoftDeletePolicy
+
+ // HierarchicalNamespace contains the bucket's hierarchical namespace
+ // configuration. Hierarchical namespace enabled buckets can contain
+ // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+ // It cannot be modified after bucket creation time.
+ // UniformBucketLevelAccess must also also be enabled on the bucket.
+ HierarchicalNamespace *HierarchicalNamespace
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@@ -754,12 +795,35 @@ type Autoclass struct {
// TerminalStorageClass: The storage class that objects in the bucket
// eventually transition to if they are not read for a certain length of
// time. Valid values are NEARLINE and ARCHIVE.
+ // To modify TerminalStorageClass, Enabled must be set to true.
TerminalStorageClass string
// TerminalStorageClassUpdateTime represents the time of the most recent
// update to "TerminalStorageClass".
TerminalStorageClassUpdateTime time.Time
}
+// SoftDeletePolicy contains the bucket's soft delete policy, which defines the
+// period of time that soft-deleted objects will be retained, and cannot be
+// permanently deleted.
+type SoftDeletePolicy struct {
+ // EffectiveTime indicates the time from which the policy, or one with a
+ // greater retention, was effective. This field is read-only.
+ EffectiveTime time.Time
+
+ // RetentionDuration is the amount of time that soft-deleted objects in the
+ // bucket will be retained and cannot be permanently deleted.
+ RetentionDuration time.Duration
+}
+
+// HierarchicalNamespace contains the bucket's hierarchical namespace
+// configuration. Hierarchical namespace enabled buckets can contain
+// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+type HierarchicalNamespace struct {
+ // Enabled indicates whether hierarchical namespace features are enabled on
+ // the bucket. This can only be set at bucket creation time currently.
+ Enabled bool
+}
+
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@@ -776,6 +840,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
+ Updated: convertTime(b.Updated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
@@ -797,6 +862,8 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
RPO: toRPO(b),
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
Autoclass: toAutoclassFromRaw(b.Autoclass),
+ SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace),
}, nil
}
@@ -811,6 +878,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
StorageClass: b.GetStorageClass(),
Created: b.GetCreateTime().AsTime(),
+ Updated: b.GetUpdateTime().AsTime(),
VersioningEnabled: b.GetVersioning().GetEnabled(),
ACL: toBucketACLRulesFromProto(b.GetAcl()),
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
@@ -830,6 +898,8 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
+ SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace),
}
}
@@ -885,6 +955,8 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
Autoclass: b.Autoclass.toRawAutoclass(),
+ SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(),
}
}
@@ -945,6 +1017,8 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
Rpo: b.RPO.String(),
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
Autoclass: b.Autoclass.toProtoAutoclass(),
+ SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(),
}
}
@@ -1026,6 +1100,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
IamConfig: bktIAM,
Rpo: ua.RPO.String(),
Autoclass: ua.Autoclass.toProtoAutoclass(),
+ SoftDeletePolicy: ua.SoftDeletePolicy.toProtoSoftDeletePolicy(),
Labels: ua.setLabels,
}
}
@@ -1143,9 +1218,15 @@ type BucketAttrsToUpdate struct {
RPO RPO
// If set, updates the autoclass configuration of the bucket.
+ // To disable autoclass on the bucket, set to an empty &Autoclass{}.
+ // To update the configuration for Autoclass.TerminalStorageClass,
+ // Autoclass.Enabled must also be set to true.
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass *Autoclass
+ // If set, updates the soft delete policy of the bucket.
+ SoftDeletePolicy *SoftDeletePolicy
+
// acl is the list of access control rules on the bucket.
// It is unexported and only used internally by the gRPC client.
// Library users should use ACLHandle methods directly.
@@ -1267,6 +1348,16 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
}
rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass")
}
+ if ua.SoftDeletePolicy != nil {
+ if ua.SoftDeletePolicy.RetentionDuration == 0 {
+ rb.SoftDeletePolicy = &raw.BucketSoftDeletePolicy{
+ RetentionDurationSeconds: 0,
+ ForceSendFields: []string{"RetentionDurationSeconds"},
+ }
+ } else {
+ rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy()
+ }
+ }
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
@@ -2047,6 +2138,92 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
}
}
+func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy {
+ if p == nil {
+ return nil
+ }
+ // Excluding read only field EffectiveTime.
+ // ForceSendFields must be set to send a zero value for RetentionDuration and disable
+ // soft delete.
+ return &raw.BucketSoftDeletePolicy{
+ RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()),
+ ForceSendFields: []string{"RetentionDurationSeconds"},
+ }
+}
+
+func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy {
+ if p == nil {
+ return nil
+ }
+ // Excluding read only field EffectiveTime.
+ return &storagepb.Bucket_SoftDeletePolicy{
+ RetentionDuration: durationpb.New(p.RetentionDuration),
+ }
+}
+
+func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy {
+ if p == nil {
+ return nil
+ }
+
+ policy := &SoftDeletePolicy{
+ RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second,
+ }
+
+ // Return EffectiveTime only if parsed to a valid value.
+ if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil {
+ policy.EffectiveTime = t
+ }
+
+ return policy
+}
+
+func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy {
+ if p == nil {
+ return nil
+ }
+ return &SoftDeletePolicy{
+ EffectiveTime: p.GetEffectiveTime().AsTime(),
+ RetentionDuration: p.GetRetentionDuration().AsDuration(),
+ }
+}
+
+func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &storagepb.Bucket_HierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &raw.BucketHierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace {
+ if p == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: p.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace {
+ if r == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: r.Enabled,
+ }
+}
+
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go
index 4906b1d1f..7ce762d5a 100644
--- a/vendor/cloud.google.com/go/storage/client.go
+++ b/vendor/cloud.google.com/go/storage/client.go
@@ -59,8 +59,10 @@ type storageClient interface {
// Object metadata methods.
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
- GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
+ GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error)
UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
+ RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error)
+ MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error)
// Default Object ACL methods.
@@ -106,6 +108,8 @@ type storageClient interface {
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
+
+ NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error)
}
// settings contains transport-agnostic configuration for API calls made via
@@ -121,7 +125,7 @@ type settings struct {
gax []gax.CallOption
// idempotent indicates if the call is idempotent or not when considering
- // if the call should be retired or not.
+ // if the call should be retried or not.
idempotent bool
// clientOption is a set of option.ClientOption to be used during client
@@ -131,6 +135,8 @@ type settings struct {
// userProject is the user project that should be billed for the request.
userProject string
+
+ metricsContext *metricsContext
}
func initSettings(opts ...storageOption) *settings {
@@ -182,16 +188,6 @@ type storageOption interface {
Apply(s *settings)
}
-func withGAXOptions(opts ...gax.CallOption) storageOption {
- return &gaxOption{opts}
-}
-
-type gaxOption struct {
- opts []gax.CallOption
-}
-
-func (o *gaxOption) Apply(s *settings) { s.gax = o.opts }
-
func withRetryConfig(rc *retryConfig) storageOption {
return &retryOption{rc}
}
@@ -244,7 +240,8 @@ type openWriterParams struct {
chunkSize int
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
// Optional.
- chunkRetryDeadline time.Duration
+ chunkRetryDeadline time.Duration
+ chunkTransferTimeout time.Duration
// Object/request properties
@@ -254,6 +251,9 @@ type openWriterParams struct {
// attrs - see `Writer.ObjectAttrs`.
// Required.
attrs *ObjectAttrs
+ // forceEmptyContentType - Disables auto-detect of Content-Type
+ // Optional.
+ forceEmptyContentType bool
// conds - see `Writer.o.conds`.
// Optional.
conds *Conditions
@@ -263,6 +263,9 @@ type openWriterParams struct {
// sendCRC32C - see `Writer.SendCRC32C`.
// Optional.
sendCRC32C bool
+ // append - Write with appendable object semantics.
+ // Optional.
+ append bool
// Writer callbacks
@@ -278,6 +281,20 @@ type openWriterParams struct {
// setObj callback for reporting the resulting object - see `Writer.obj`.
// Required.
setObj func(*ObjectAttrs)
+ // setFlush callback for providing a Flush function implementation - see `Writer.Flush`.
+ // Required.
+ setFlush func(func() (int64, error))
+ // setPipeWriter callback for reseting `Writer.pw` if needed.
+ setPipeWriter func(*io.PipeWriter)
+}
+
+type newMultiRangeDownloaderParams struct {
+ bucket string
+ conds *Conditions
+ encryptionKey []byte
+ gen int64
+ object string
+ handle *ReadHandle
}
type newRangeReaderParams struct {
@@ -289,6 +306,15 @@ type newRangeReaderParams struct {
object string
offset int64
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
+ handle *ReadHandle
+}
+
+type getObjectParams struct {
+ bucket, object string
+ gen int64
+ encryptionKey []byte
+ conds *Conditions
+ softDeleted bool
}
type updateObjectParams struct {
@@ -300,6 +326,21 @@ type updateObjectParams struct {
overrideRetention *bool
}
+type restoreObjectParams struct {
+ bucket, object string
+ gen int64
+ encryptionKey []byte
+ conds *Conditions
+ copySourceACL bool
+}
+
+type moveObjectParams struct {
+ bucket, srcObject, dstObject string
+ srcConds *Conditions
+ dstConds *Conditions
+ encryptionKey []byte
+}
+
type composeObjectRequest struct {
dstBucket string
dstObject destinationObject
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index 22adb744f..4fcfb7326 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -331,13 +331,14 @@ to add a [custom audit logging] header:
// Use client as usual with the context and the additional headers will be sent.
client.Bucket("my-bucket").Attrs(ctx)
-# Experimental gRPC API
+# gRPC API
-This package includes support for the Cloud Storage gRPC API, which is currently
-in preview. This implementation uses gRPC rather than the current JSON & XML
-APIs to make requests to Cloud Storage. If you would like to try the API,
-please contact your GCP account rep for more information. The gRPC API is not
-yet generally available, so it may be subject to breaking changes.
+This package includes support for the Cloud Storage gRPC API. The
+implementation uses gRPC rather than the Default
+JSON & XML APIs to make requests to Cloud Storage.
+The Go Storage gRPC client is generally available.
+The Notifications, Serivce Account HMAC
+and GetServiceAccount RPCs are not supported through the gRPC client.
To create a client which will use gRPC, use the alternate constructor:
@@ -348,16 +349,56 @@ To create a client which will use gRPC, use the alternate constructor:
}
// Use client as usual.
-If the application is running within GCP, users may get better performance by
-enabling DirectPath (enabling requests to skip some proxy steps). To enable,
-set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
-the following side-effect imports to your application:
-
- import (
- _ "google.golang.org/grpc/balancer/rls"
- _ "google.golang.org/grpc/xds/googledirectpath"
- )
-
+Using the gRPC API inside GCP with a bucket in the same region can allow for
+[Direct Connectivity] (enabling requests to skip some proxy steps and reducing
+response latency). A warning is emmitted if gRPC is not used within GCP to
+warn that Direct Connectivity could not be initialized. Direct Connectivity
+is not required to access the gRPC API.
+
+Dependencies for the gRPC API may slightly increase the size of binaries for
+applications depending on this package. If you are not using gRPC, you can use
+the build tag `disable_grpc_modules` to opt out of these dependencies and
+reduce the binary size.
+
+The gRPC client emits metrics by default and will export the
+gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring
+API and you incur no additional cost for publishing the metrics. Google Cloud
+Support can use this information to more quickly diagnose problems related to
+GCS and gRPC.
+Sending this data does not incur any billing charges, and requires minimal
+CPU (a single RPC every minute) or memory (a few KiB to batch the
+telemetry).
+
+To access the metrics you can view them through Cloud Monitoring
+[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted
+every minute.
+
+You can disable metrics using the following example when creating a new gRPC
+client using [WithDisabledClientMetrics].
+
+The metrics exporter uses Cloud Monitoring API which determines
+project ID and credentials doing the following:
+
+* Project ID is determined using OTel Resource Detector for the environment
+otherwise it falls back to the project provided by [google.FindCredentials].
+
+* Credentials are determined using [Application Default Credentials]. The
+principal must have `roles/monitoring.metricWriter` role granted. If not a
+logged warning will be emitted. Subsequent are silenced to prevent noisy logs.
+
+# Storage Control API
+
+Certain control plane and long-running operations for Cloud Storage (including Folder
+and Managed Folder operations) are supported via the autogenerated Storage Control
+client, which is available as a subpackage in this module. See package docs at
+[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs.
+
+[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials
+[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials
+[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
@@ -366,5 +407,8 @@ the following side-effect imports to your application:
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
+[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
+[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer
+[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity
*/
package storage // import "cloud.google.com/go/storage"
diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go
new file mode 100644
index 000000000..5944f515d
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go
@@ -0,0 +1,237 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+)
+
+// dynamicDelay dynamically calculates the delay at a fixed percentile, based on
+// delay samples.
+//
+// dynamicDelay is goroutine-safe.
+type dynamicDelay struct {
+ increaseFactor float64
+ decreaseFactor float64
+ minDelay time.Duration
+ maxDelay time.Duration
+ value time.Duration
+
+ // Guards the value
+ mu *sync.RWMutex
+}
+
+// validateDynamicDelayParams ensures,
+// targetPercentile is a valid fraction (between 0 and 1).
+// increaseRate is a positive number.
+// minDelay is less than maxDelay.
+func validateDynamicDelayParams(targetPercentile, increaseRate float64, minDelay, maxDelay time.Duration) error {
+ if targetPercentile < 0 || targetPercentile > 1 {
+ return fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile)
+ }
+ if increaseRate <= 0 {
+ return fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate)
+ }
+ if minDelay >= maxDelay {
+ return fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay)
+ }
+ return nil
+}
+
+// NewDynamicDelay returns a dynamicDelay.
+//
+// targetPercentile is the desired percentile to be computed. For example, a
+// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be
+// in the range [0, 1].
+//
+// increaseRate (must be > 0) determines how many increase calls it takes for
+// Value to double.
+//
+// initialDelay is the start value of the delay.
+//
+// decrease can never lower the delay past minDelay, increase can never raise
+// the delay past maxDelay.
+func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) *dynamicDelay {
+ if initialDelay < minDelay {
+ initialDelay = minDelay
+ }
+ if initialDelay > maxDelay {
+ initialDelay = maxDelay
+ }
+
+ // Compute increaseFactor and decreaseFactor such that:
+ // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1
+ increaseFactor := math.Exp(math.Log(2) / increaseRate)
+ if increaseFactor < 1.001 {
+ increaseFactor = 1.001
+ }
+ decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile)
+ if decreaseFactor > 0.9999 {
+ decreaseFactor = 0.9999
+ }
+
+ return &dynamicDelay{
+ increaseFactor: increaseFactor,
+ decreaseFactor: decreaseFactor,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ value: initialDelay,
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (d *dynamicDelay) unsafeIncrease() {
+ v := time.Duration(float64(d.value) * d.increaseFactor)
+ if v > d.maxDelay {
+ d.value = d.maxDelay
+ } else {
+ d.value = v
+ }
+}
+
+// increase notes that the operation took longer than the delay returned by Value.
+func (d *dynamicDelay) increase() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeIncrease()
+}
+
+func (d *dynamicDelay) unsafeDecrease() {
+ v := time.Duration(float64(d.value) * d.decreaseFactor)
+ if v < d.minDelay {
+ d.value = d.minDelay
+ } else {
+ d.value = v
+ }
+}
+
+// decrease notes that the operation completed before the delay returned by getValue.
+func (d *dynamicDelay) decrease() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeDecrease()
+}
+
+// update updates the delay value depending on the specified latency.
+func (d *dynamicDelay) update(latency time.Duration) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if latency > d.value {
+ d.unsafeIncrease()
+ } else {
+ d.unsafeDecrease()
+ }
+}
+
+// getValue returns the desired delay to wait before retry the operation.
+func (d *dynamicDelay) getValue() time.Duration {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ return d.value
+}
+
+// printDelay prints the state of delay, helpful in debugging.
+func (d *dynamicDelay) printDelay() {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ fmt.Println("IncreaseFactor: ", d.increaseFactor)
+ fmt.Println("DecreaseFactor: ", d.decreaseFactor)
+ fmt.Println("MinDelay: ", d.minDelay)
+ fmt.Println("MaxDelay: ", d.maxDelay)
+ fmt.Println("Value: ", d.value)
+}
+
+// bucketDelayManager wraps dynamicDelay to provide bucket-specific delays.
+type bucketDelayManager struct {
+ targetPercentile float64
+ increaseRate float64
+ initialDelay time.Duration
+ minDelay time.Duration
+ maxDelay time.Duration
+
+ // delays maps bucket names to their dynamic delay instance.
+ delays map[string]*dynamicDelay
+
+ // mu guards delays.
+ mu *sync.RWMutex
+}
+
+// newBucketDelayManager returns a new bucketDelayManager instance.
+func newBucketDelayManager(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*bucketDelayManager, error) {
+ err := validateDynamicDelayParams(targetPercentile, increaseRate, minDelay, maxDelay)
+ if err != nil {
+ return nil, err
+ }
+
+ return &bucketDelayManager{
+ targetPercentile: targetPercentile,
+ increaseRate: increaseRate,
+ initialDelay: initialDelay,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ delays: make(map[string]*dynamicDelay),
+ mu: &sync.RWMutex{},
+ }, nil
+}
+
+// getDelay retrieves the dynamicDelay instance for the given bucket name. If no delay
+// exists for the bucket, a new one is created with the configured parameters.
+func (b *bucketDelayManager) getDelay(bucketName string) *dynamicDelay {
+ b.mu.RLock()
+ delay, ok := b.delays[bucketName]
+ b.mu.RUnlock()
+
+ if !ok {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Check again, as someone might create b/w the execution of mu.RUnlock() and mu.Lock().
+ delay, ok = b.delays[bucketName]
+ if !ok {
+ // Create a new dynamicDelay for the bucket if it doesn't exist
+ delay = newDynamicDelay(b.targetPercentile, b.increaseRate, b.initialDelay, b.minDelay, b.maxDelay)
+ b.delays[bucketName] = delay
+ }
+ }
+ return delay
+}
+
+// increase notes that the operation took longer than the delay for the given bucket.
+func (b *bucketDelayManager) increase(bucketName string) {
+ b.getDelay(bucketName).increase()
+}
+
+// decrease notes that the operation completed before the delay for the given bucket.
+func (b *bucketDelayManager) decrease(bucketName string) {
+ b.getDelay(bucketName).decrease()
+}
+
+// update updates the delay value for the bucket depending on the specified latency.
+func (b *bucketDelayManager) update(bucketName string, latency time.Duration) {
+ b.getDelay(bucketName).update(latency)
+}
+
+// getValue returns the desired delay to wait before retrying the operation for the given bucket.
+func (b *bucketDelayManager) getValue(bucketName string) time.Duration {
+ return b.getDelay(bucketName).getValue()
+}
diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh
index 7bad7cf39..cf26432e0 100644
--- a/vendor/cloud.google.com/go/storage/emulator_test.sh
+++ b/vendor/cloud.google.com/go/storage/emulator_test.sh
@@ -89,4 +89,4 @@ then
fi
# Run tests
-go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
+go test -v -timeout 15m ./ ./dataflux -run="^Test(RetryConformance|.*Emulated)$" -short -race 2>&1 | tee -a sponge_log.log
diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go
new file mode 100644
index 000000000..5bcc59ad2
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go
@@ -0,0 +1,87 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package experimental is a collection of experimental features that might
+// have some rough edges to them. Housing experimental features in this package
+// results in a user accessing these APIs as `experimental.Foo`, thereby making
+// it explicit that the feature is experimental and using them in production
+// code is at their own risk.
+//
+// All APIs in this package are experimental.
+package experimental
+
+import (
+ "time"
+
+ "cloud.google.com/go/storage/internal"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "google.golang.org/api/option"
+)
+
+// WithMetricInterval provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
+// It sets how often to emit metrics [metric.WithInterval] when using
+// [metric.NewPeriodicReader]
+// When using Cloud Monitoring interval must be at minimum 1 [time.Minute].
+func WithMetricInterval(metricInterval time.Duration) option.ClientOption {
+ return internal.WithMetricInterval.(func(time.Duration) option.ClientOption)(metricInterval)
+}
+
+// WithMetricExporter provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
+// Set an alternate client-side metric Exporter to emit metrics through.
+// Must implement [metric.Exporter]
+func WithMetricExporter(ex *metric.Exporter) option.ClientOption {
+ return internal.WithMetricExporter.(func(*metric.Exporter) option.ClientOption)(ex)
+}
+
+// WithReadStallTimeout provides a [option.ClientOption] that may be passed to [storage.NewClient].
+// It enables the client to retry stalled requests when starting a download from
+// Cloud Storage. If the timeout elapses with no response from the server, the request
+// is automatically retried.
+// The timeout is initially set to ReadStallTimeoutConfig.Min. The client tracks
+// latency across all read requests from the client for each bucket accessed, and can
+// adjust the timeout higher to the target percentile when latency for request to that
+// bucket is high.
+// Currently, this is supported only for downloads ([storage.NewReader] and
+// [storage.NewRangeReader] calls) and only for the XML API. Other read APIs (gRPC & JSON)
+// will be supported soon.
+func WithReadStallTimeout(rstc *ReadStallTimeoutConfig) option.ClientOption {
+ return internal.WithReadStallTimeout.(func(config *ReadStallTimeoutConfig) option.ClientOption)(rstc)
+}
+
+// ReadStallTimeoutConfig defines the timeout which is adjusted dynamically based on
+// past observed latencies.
+type ReadStallTimeoutConfig struct {
+ // Min is the minimum duration of the timeout. The default value is 500ms. Requests
+ // taking shorter than this value to return response headers will never time out.
+ // In general, you should choose a Min value that is greater than the typical value
+ // for the target percentile.
+ Min time.Duration
+
+ // TargetPercentile is the percentile to target for the dynamic timeout. The default
+ // value is 0.99. At the default percentile, at most 1% of requests will be timed out
+ // and retried.
+ TargetPercentile float64
+}
+
+// WithGRPCBidiReads provides an [option.ClientOption] that may be passed to
+// [cloud.google.com/go/storage.NewGRPCClient].
+// It enables the client to use bi-directional gRPC APIs for downloads rather than the
+// server streaming API. In particular, it allows users to use the [storage.MultiRangeDownloader]
+// surface, which requires bi-directional streaming.
+//
+// The bi-directional API is in private preview; please contact your account manager if
+// interested.
+func WithGRPCBidiReads() option.ClientOption {
+ return internal.WithGRPCBidiReads.(func() option.ClientOption)()
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index a51cf9c08..e3f697509 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -16,12 +16,15 @@ package storage
import (
"context"
- "encoding/base64"
+ "encoding/binary"
"errors"
"fmt"
+ "hash/crc32"
"io"
+ "log"
"net/url"
"os"
+ "sync"
"cloud.google.com/go/iam/apiv1/iampb"
"cloud.google.com/go/internal/trace"
@@ -32,10 +35,15 @@ import (
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
+ "google.golang.org/api/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
)
@@ -91,10 +99,13 @@ func defaultGRPCOptions() []option.ClientOption {
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
+ WithDisabledClientMetrics(),
)
} else {
// Only enable DirectPath when the emulator is not being targeted.
- defaults = append(defaults, internaloption.EnableDirectPath(true))
+ defaults = append(defaults,
+ internaloption.EnableDirectPath(true),
+ internaloption.EnableDirectPathXds())
}
return defaults
@@ -105,6 +116,25 @@ func defaultGRPCOptions() []option.ClientOption {
type grpcStorageClient struct {
raw *gapic.Client
settings *settings
+ config *storageConfig
+}
+
+func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) {
+ var project string
+ // TODO: use new auth client
+ c, err := transport.Creds(ctx, s.clientOption...)
+ if err == nil {
+ project = c.ProjectID
+ }
+ metricsContext, err := newGRPCMetricContext(ctx, metricsConfig{
+ project: project,
+ interval: config.metricInterval,
+ manualReader: config.manualReader},
+ )
+ if err != nil {
+ return nil, fmt.Errorf("gRPC Metrics: %w", err)
+ }
+ return metricsContext, nil
}
// newGRPCStorageClient initializes a new storageClient that uses the gRPC
@@ -112,12 +142,23 @@ type grpcStorageClient struct {
func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) {
s := initSettings(opts...)
s.clientOption = append(defaultGRPCOptions(), s.clientOption...)
+ // Disable all gax-level retries in favor of retry logic in the veneer client.
+ s.gax = append(s.gax, gax.WithRetry(nil))
config := newStorageConfig(s.clientOption...)
if config.readAPIWasSet {
return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads")
}
+ if !config.disableClientMetrics {
+ // Do not fail client creation if enabling metrics fails.
+ if metricsContext, err := enableClientMetrics(ctx, s, config); err == nil {
+ s.metricsContext = metricsContext
+ s.clientOption = append(s.clientOption, metricsContext.clientOpts...)
+ } else {
+ log.Printf("Failed to enable client metrics: %v", err)
+ }
+ }
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
@@ -126,30 +167,22 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return &grpcStorageClient{
raw: g,
settings: s,
+ config: &config,
}, nil
}
func (c *grpcStorageClient) Close() error {
+ if c.settings.metricsContext != nil {
+ c.settings.metricsContext.close()
+ }
return c.raw.Close()
}
// Top-level methods.
+// GetServiceAccount is not supported in the gRPC client.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetServiceAccountRequest{
- Project: toProjectResource(project),
- }
- var resp *storagepb.ServiceAccount
- err := run(ctx, func(ctx context.Context) error {
- var err error
- resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- return resp.EmailAddress, err
+ return "", errMethodNotSupported
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
@@ -272,17 +305,11 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds
var battrs *BucketAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.GetBucket(ctx, req, s.gax...)
-
battrs = newBucketFromProto(res)
-
return err
}, s.retry, s.idempotent)
- if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
- return nil, ErrBucketNotExist
- }
-
- return battrs, err
+ return battrs, formatBucketError(err)
}
func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
@@ -361,6 +388,9 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
if uattrs.Autoclass != nil {
fieldMask.Paths = append(fieldMask.Paths, "autoclass")
}
+ if uattrs.SoftDeletePolicy != nil {
+ fieldMask.Paths = append(fieldMask.Paths, "soft_delete_policy")
+ }
for label := range uattrs.setLabels {
fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label))
@@ -373,6 +403,13 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
req.UpdateMask = fieldMask
+ if len(fieldMask.Paths) < 1 {
+ // Nothing to update. Send a get request for current attrs instead. This
+ // maintains consistency with JSON bucket updates.
+ opts = append(opts, idempotent(true))
+ return c.GetBucket(ctx, bucket, conds, opts...)
+ }
+
var battrs *BucketAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.UpdateBucket(ctx, req, s.gax...)
@@ -415,6 +452,8 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter,
MatchGlob: it.query.MatchGlob,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
+ SoftDeleted: it.query.SoftDeleted,
+ IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
@@ -429,10 +468,7 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
return err
}, s.retry, s.idempotent)
if err != nil {
- if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound {
- err = ErrBucketNotExist
- }
- return "", err
+ return "", formatBucketError(err)
}
for _, obj := range objects {
@@ -474,27 +510,30 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str
return c.raw.DeleteObject(ctx, req, s.gax...)
}, s.retry, s.idempotent)
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
- return ErrObjectNotExist
+ return formatObjectErr(err)
}
return err
}
-func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
+func (c *grpcStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
req := &storagepb.GetObjectRequest{
- Bucket: bucketResourceName(globalProjectAlias, bucket),
- Object: object,
+ Bucket: bucketResourceName(globalProjectAlias, params.bucket),
+ Object: params.object,
// ProjectionFull by default.
ReadMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}},
}
- if err := applyCondsProto("grpcStorageClient.GetObject", gen, conds, req); err != nil {
+ if err := applyCondsProto("grpcStorageClient.GetObject", params.gen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
- if encryptionKey != nil {
- req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
+ if params.encryptionKey != nil {
+ req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey)
+ }
+ if params.softDeleted {
+ req.SoftDeleted = ¶ms.softDeleted
}
var attrs *ObjectAttrs
@@ -506,7 +545,7 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
}, s.retry, s.idempotent)
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
- return nil, ErrObjectNotExist
+ return nil, formatObjectErr(err)
}
return attrs, err
@@ -584,6 +623,17 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje
req.UpdateMask = fieldMask
+ if len(fieldMask.Paths) < 1 {
+ // Nothing to update. To maintain consistency with JSON, we must still
+ // update the object because metageneration and other fields are
+ // updated even on an empty update.
+ // gRPC will fail if the fieldmask is empty, so instead we add an
+ // output-only field to the update mask. Output-only fields are (and must
+ // be - see AIP 161) ignored, but allow us to send an empty update because
+ // any mask that is valid for read (as this one is) must be valid for write.
+ fieldMask.Paths = append(fieldMask.Paths, "create_time")
+ }
+
var attrs *ObjectAttrs
err := run(ctx, func(ctx context.Context) error {
res, err := c.raw.UpdateObject(ctx, req, s.gax...)
@@ -591,9 +641,65 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObje
return err
}, s.retry, s.idempotent)
if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound {
- return nil, ErrObjectNotExist
+ return nil, formatObjectErr(err)
+ }
+
+ return attrs, err
+}
+
+func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := &storagepb.RestoreObjectRequest{
+ Bucket: bucketResourceName(globalProjectAlias, params.bucket),
+ Object: params.object,
+ CopySourceAcl: ¶ms.copySourceACL,
+ }
+ if err := applyCondsProto("grpcStorageClient.RestoreObject", params.gen, params.conds, req); err != nil {
+ return nil, err
+ }
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ var attrs *ObjectAttrs
+ err := run(ctx, func(ctx context.Context) error {
+ res, err := c.raw.RestoreObject(ctx, req, s.gax...)
+ attrs = newObjectFromProto(res)
+ return err
+ }, s.retry, s.idempotent)
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return nil, formatObjectErr(err)
+ }
+ return attrs, err
+}
+
+func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := &storagepb.MoveObjectRequest{
+ Bucket: bucketResourceName(globalProjectAlias, params.bucket),
+ SourceObject: params.srcObject,
+ DestinationObject: params.dstObject,
+ }
+ if err := applyCondsProto("MoveObjectDestination", defaultGen, params.dstConds, req); err != nil {
+ return nil, err
+ }
+ if err := applySourceCondsProto("MoveObjectSource", defaultGen, params.srcConds, req); err != nil {
+ return nil, err
+ }
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
}
+ var attrs *ObjectAttrs
+ err := run(ctx, func(ctx context.Context) error {
+ res, err := c.raw.MoveObject(ctx, req, s.gax...)
+ attrs = newObjectFromProto(res)
+ return err
+ }, s.retry, s.idempotent)
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return nil, formatObjectErr(err)
+ }
return attrs, err
}
@@ -726,7 +832,7 @@ func (c *grpcStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve ObjectAttrs.
- attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
+ attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return err
}
@@ -759,7 +865,7 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
// Selecting a specific generation of this object is not currently supported by the client.
func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
- o, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
+ o, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return nil, err
}
@@ -769,7 +875,7 @@ func (c *grpcStorageClient) ListObjectACLs(ctx context.Context, bucket, object s
func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
// There is no separate API for PATCH in gRPC.
// Make a GET call first to retrieve ObjectAttrs.
- attrs, err := c.GetObject(ctx, bucket, object, defaultGen, nil, nil, opts...)
+ attrs, err := c.GetObject(ctx, &getObjectParams{bucket, object, defaultGen, nil, nil, false}, opts...)
if err != nil {
return err
}
@@ -834,6 +940,9 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec
obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...)
return err
}, s.retry, s.idempotent); err != nil {
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return nil, formatObjectErr(err)
+ }
return nil, err
}
@@ -861,7 +970,7 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
return nil, err
}
- if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil {
+ if err := applySourceCondsProto("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil {
return nil, err
}
@@ -883,6 +992,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err }
if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil {
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return nil, formatObjectErr(err)
+ }
return nil, err
}
@@ -897,10 +1009,71 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
-func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
- defer func() { trace.EndSpan(ctx, err) }()
+// Custom codec to be used for unmarshaling BidiReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecV2 struct {
+}
+
+var _ encoding.CodecV2 = bytesCodecV2{}
+
+// Marshal is used to encode messages to send for bytesCodecV2. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) {
+ vv, ok := v.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ }
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
+}
+
+// Unmarshal is used for data received for BidiReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error {
+ switch v := v.(type) {
+ case *mem.BufferSlice:
+ *v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
+ return nil
+ case proto.Message:
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
+ default:
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
+ }
+}
+
+func (bytesCodecV2) Name() string {
+ return ""
+}
+
+func contextMetadataFromBidiReadObject(req *storagepb.BidiReadObjectRequest) []string {
+ if len(req.GetReadObjectSpec().GetRoutingToken()) > 0 {
+ return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s&routing_token=%s", req.GetReadObjectSpec().GetBucket(), req.GetReadObjectSpec().GetRoutingToken())}
+ }
+ return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s", req.GetReadObjectSpec().GetBucket())}
+}
+
+type rangeSpec struct {
+ readID int64
+ writer io.Writer
+ offset int64
+ limit int64
+ currentBytesWritten int64
+ totalBytesWritten int64
+ callback func(int64, int64, error)
+}
+func (c *grpcStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewMultiRangeDownloader")
+ defer func() { trace.EndSpan(ctx, err) }()
s := callSettings(c.settings, opts...)
if s.userProject != "" {
@@ -908,58 +1081,69 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
b := bucketResourceName(globalProjectAlias, params.bucket)
- req := &storagepb.ReadObjectRequest{
+ object := params.object
+ r := &storagepb.BidiReadObjectSpec{
Bucket: b,
- Object: params.object,
+ Object: object,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
}
+
// The default is a negative value, which means latest.
if params.gen >= 0 {
- req.Generation = params.gen
+ r.Generation = params.gen
}
- // Define a function that initiates a Read with offset and length, assuming
- // we have already read seen bytes.
- reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
- // If the context has already expired, return immediately without making
- // we call.
- if err := ctx.Err(); err != nil {
- return nil, nil, err
+ if params.handle != nil && len(*params.handle) != 0 {
+ r.ReadHandle = &storagepb.BidiReadHandle{
+ Handle: *params.handle,
}
+ }
+ req := &storagepb.BidiReadObjectRequest{
+ ReadObjectSpec: r,
+ }
- cc, cancel := context.WithCancel(ctx)
-
- req.ReadOffset = params.offset + seen
-
- // Only set a ReadLimit if length is greater than zero, because <= 0 means
- // to read it all.
- if params.length > 0 {
- req.ReadLimit = params.length - seen
- }
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
- if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil {
- cancel()
+ openStream := func(readHandle ReadHandle) (*bidiReadStreamResponse, context.CancelFunc, error) {
+ if err := applyCondsProto("grpcStorageClient.BidiReadObject", params.gen, params.conds, r); err != nil {
return nil, nil, err
}
-
- var stream storagepb.Storage_ReadObjectClient
- var msg *storagepb.ReadObjectResponse
- var err error
-
+ if len(readHandle) != 0 {
+ req.GetReadObjectSpec().ReadHandle = &storagepb.BidiReadHandle{
+ Handle: readHandle,
+ }
+ }
+ var stream storagepb.Storage_BidiReadObjectClient
+ var resp *storagepb.BidiReadObjectResponse
+ cc, cancel := context.WithCancel(ctx)
err = run(cc, func(ctx context.Context) error {
- stream, err = c.raw.ReadObject(cc, req, s.gax...)
+ stream, err = c.raw.BidiReadObject(ctx, s.gax...)
if err != nil {
+ // BidiReadObjectRedirectedError error is only returned on initial open in case of a redirect.
+ // The routing token that should be used when reopening the read stream. Needs to be exported.
+ rpcStatus := status.Convert(err)
+ details := rpcStatus.Details()
+ for _, detail := range details {
+ if bidiError, ok := detail.(*storagepb.BidiReadObjectRedirectedError); ok {
+ r.ReadHandle = bidiError.ReadHandle
+ r.RoutingToken = bidiError.RoutingToken
+ req.ReadObjectSpec = r
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
+ }
+ }
return err
}
-
- msg, err = stream.Recv()
- // These types of errors show up on the Recv call, rather than the
- // initialization of the stream via ReadObject above.
- if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
- return ErrObjectNotExist
+ // Incase stream opened succesfully, send first message on the stream.
+ // First message to stream should contain read_object_spec
+ err = stream.Send(req)
+ if err != nil {
+ return err
}
-
- return err
+ resp, err = stream.Recv()
+ if err != nil {
+ return err
+ }
+ return nil
}, s.retry, s.idempotent)
if err != nil {
// Close the stream context we just created to ensure we don't leak
@@ -967,23 +1151,226 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
cancel()
return nil, nil, err
}
-
- return &readStreamResponse{stream, msg}, cancel, nil
+ return &bidiReadStreamResponse{stream: stream, response: resp}, cancel, nil
}
- res, cancel, err := reopen(0)
+ // For the first time open stream without adding any range.
+ resp, cancel, err := openStream(nil)
if err != nil {
return nil, err
}
// The first message was Recv'd on stream open, use it to populate the
// object metadata.
- msg := res.response
+ msg := resp.response
obj := msg.GetMetadata()
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
- r = &Reader{
+ rr := &gRPCBidiReader{
+ stream: resp.stream,
+ cancel: cancel,
+ settings: s,
+ readHandle: msg.GetReadHandle().GetHandle(),
+ readID: 1,
+ reopen: openStream,
+ readSpec: r,
+ data: make(chan []rangeSpec, 100),
+ ctx: ctx,
+ closeReceiver: make(chan bool, 10),
+ closeManager: make(chan bool, 10),
+ managerRetry: make(chan bool), // create unbuffered channel for closing the streamManager goroutine.
+ receiverRetry: make(chan bool), // create unbuffered channel for closing the streamReceiver goroutine.
+ mp: make(map[int64]rangeSpec),
+ done: false,
+ activeTask: 0,
+ streamRecreation: false,
+ }
+
+ // streamManager goroutine runs in background where we send message to gcs and process response.
+ streamManager := func() {
+ var currentSpec []rangeSpec
+ for {
+ select {
+ case <-rr.ctx.Done():
+ rr.mu.Lock()
+ rr.done = true
+ rr.mu.Unlock()
+ return
+ case <-rr.managerRetry:
+ return
+ case <-rr.closeManager:
+ rr.mu.Lock()
+ if len(rr.mp) != 0 {
+ for key := range rr.mp {
+ rr.mp[key].callback(rr.mp[key].offset, rr.mp[key].totalBytesWritten, fmt.Errorf("stream closed early"))
+ delete(rr.mp, key)
+ }
+ }
+ rr.activeTask = 0
+ rr.mu.Unlock()
+ return
+ case currentSpec = <-rr.data:
+ var readRanges []*storagepb.ReadRange
+ var err error
+ rr.mu.Lock()
+ for _, v := range currentSpec {
+ rr.mp[v.readID] = v
+ readRanges = append(readRanges, &storagepb.ReadRange{ReadOffset: v.offset, ReadLength: v.limit, ReadId: v.readID})
+ }
+ rr.mu.Unlock()
+ // We can just send 100 request to gcs in one request.
+ // In case of Add we will send only one range request to gcs but in case of retry we can have more than 100 ranges.
+ // Hence be will divide the request in chunk of 100.
+ // For example with 457 ranges on stream we will have 5 request to gcs [0:99], [100:199], [200:299], [300:399], [400:456]
+ requestCount := len(readRanges) / 100
+ if len(readRanges)%100 != 0 {
+ requestCount++
+ }
+ for i := 0; i < requestCount; i++ {
+ start := i * 100
+ end := (i + 1) * 100
+ if end > len(readRanges) {
+ end = len(readRanges)
+ }
+ curReq := readRanges[start:end]
+ err = rr.stream.Send(&storagepb.BidiReadObjectRequest{
+ ReadRanges: curReq,
+ })
+ if err != nil {
+ // cancel stream and reopen the stream again.
+ // Incase again an error is thrown close the streamManager goroutine.
+ rr.retrier(err, "manager")
+ break
+ }
+ }
+
+ }
+ }
+ }
+
+ streamReceiver := func() {
+ var resp *storagepb.BidiReadObjectResponse
+ var err error
+ for {
+ select {
+ case <-rr.ctx.Done():
+ rr.done = true
+ return
+ case <-rr.receiverRetry:
+ return
+ case <-rr.closeReceiver:
+ return
+ default:
+ // This function reads the data sent for a particular range request and has a callback
+ // to indicate that output buffer is filled.
+ resp, err = rr.stream.Recv()
+ if resp.GetReadHandle().GetHandle() != nil {
+ rr.readHandle = resp.GetReadHandle().GetHandle()
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ if err != nil {
+ // cancel stream and reopen the stream again.
+ // Incase again an error is thrown close the streamManager goroutine.
+ rr.retrier(err, "receiver")
+ }
+
+ if err == nil {
+ rr.mu.Lock()
+ if len(rr.mp) == 0 && rr.activeTask == 0 {
+ rr.closeReceiver <- true
+ rr.closeManager <- true
+ return
+ }
+ rr.mu.Unlock()
+ arr := resp.GetObjectDataRanges()
+ for _, val := range arr {
+ id := val.GetReadRange().GetReadId()
+ rr.mu.Lock()
+ _, ok := rr.mp[id]
+ if !ok {
+ // it's ok to ignore responses for read_id not in map as user would have been notified by callback.
+ continue
+ }
+ _, err = rr.mp[id].writer.Write(val.GetChecksummedData().GetContent())
+ if err != nil {
+ rr.mp[id].callback(rr.mp[id].offset, rr.mp[id].totalBytesWritten, err)
+ rr.activeTask--
+ delete(rr.mp, id)
+ } else {
+ rr.mp[id] = rangeSpec{
+ readID: rr.mp[id].readID,
+ writer: rr.mp[id].writer,
+ offset: rr.mp[id].offset,
+ limit: rr.mp[id].limit,
+ currentBytesWritten: rr.mp[id].currentBytesWritten + int64(len(val.GetChecksummedData().GetContent())),
+ totalBytesWritten: rr.mp[id].totalBytesWritten + int64(len(val.GetChecksummedData().GetContent())),
+ callback: rr.mp[id].callback,
+ }
+ }
+ if val.GetRangeEnd() {
+ rr.mp[id].callback(rr.mp[id].offset, rr.mp[id].totalBytesWritten, nil)
+ rr.activeTask--
+ delete(rr.mp, id)
+ }
+ rr.mu.Unlock()
+ }
+ }
+
+ }
+ }
+ }
+
+ rr.retrier = func(err error, thread string) {
+ rr.mu.Lock()
+ if !rr.streamRecreation {
+ rr.streamRecreation = true
+ } else {
+ rr.mu.Unlock()
+ return
+ }
+ rr.mu.Unlock()
+ // close both the go routines to make the stream recreation syncronous.
+ if thread == "receiver" {
+ rr.managerRetry <- true
+ } else {
+ rr.receiverRetry <- true
+ }
+ err = rr.retryStream(err)
+ if err != nil {
+ rr.mu.Lock()
+ for key := range rr.mp {
+ rr.mp[key].callback(rr.mp[key].offset, rr.mp[key].totalBytesWritten, err)
+ delete(rr.mp, key)
+ }
+ // In case we hit an permanent error, delete entries from map and remove active tasks.
+ rr.activeTask = 0
+ rr.mu.Unlock()
+ rr.close()
+ } else {
+ // If stream recreation happened successfully lets again start
+ // both the goroutine making the whole flow asynchronous again.
+ if thread == "receiver" {
+ go streamManager()
+ } else {
+ go streamReceiver()
+ }
+ }
+ rr.mu.Lock()
+ rr.streamRecreation = false
+ rr.mu.Unlock()
+ }
+
+ rr.mu.Lock()
+ rr.objectSize = size
+ rr.mu.Unlock()
+
+ go streamManager()
+ go streamReceiver()
+
+ return &MultiRangeDownloader{
Attrs: ReaderObjectAttrs{
Size: size,
ContentType: obj.GetContentType(),
@@ -993,125 +1380,408 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
},
- reader: &gRPCReader{
- stream: res.stream,
- reopen: reopen,
- cancel: cancel,
- size: size,
- // Store the content from the first Recv in the
- // client buffer for reading later.
- leftovers: msg.GetChecksummedData().GetContent(),
- settings: s,
- zeroRange: params.length == 0,
- },
- }
+ reader: rr,
+ }, nil
+}
- cr := msg.GetContentRange()
- if cr != nil {
- r.Attrs.StartOffset = cr.GetStart()
- r.remain = cr.GetEnd() - cr.GetStart()
- } else {
- r.remain = size
+func getActiveRange(r *gRPCBidiReader) []rangeSpec {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ var activeRange []rangeSpec
+ for k, v := range r.mp {
+ activeRange = append(activeRange, rangeSpec{
+ readID: k,
+ writer: v.writer,
+ offset: (v.offset + v.currentBytesWritten),
+ limit: v.limit - v.currentBytesWritten,
+ callback: v.callback,
+ currentBytesWritten: 0,
+ totalBytesWritten: v.totalBytesWritten,
+ })
+ r.mp[k] = activeRange[len(activeRange)-1]
}
+ return activeRange
+}
- // For a zero-length request, explicitly close the stream and set remaining
- // bytes to zero.
- if params.length == 0 {
- r.remain = 0
- r.reader.Close()
+// retryStream cancel's stream and reopen the stream again.
+func (r *gRPCBidiReader) retryStream(err error) error {
+ var shouldRetry = ShouldRetry
+ if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
+ shouldRetry = r.settings.retry.shouldRetry
+ }
+ if shouldRetry(err) {
+ // This will "close" the existing stream and immediately attempt to
+ // reopen the stream, but will backoff if further attempts are necessary.
+ // When Reopening the stream only failed readID will be added to stream.
+ return r.reopenStream(getActiveRange(r))
}
+ return err
+}
- // Only support checksums when reading an entire object, not a range.
- if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
- r.wantCRC = checksums.GetCrc32C()
- r.checkCRC = true
+// reopenStream "closes" the existing stream and attempts to reopen a stream and
+// sets the Reader's stream and cancelStream properties in the process.
+func (r *gRPCBidiReader) reopenStream(failSpec []rangeSpec) error {
+ // Close existing stream and initialize new stream with updated offset.
+ if r.cancel != nil {
+ r.cancel()
}
- return r, nil
+ res, cancel, err := r.reopen(r.readHandle)
+ if err != nil {
+ return err
+ }
+ r.stream = res.stream
+ r.cancel = cancel
+ r.readHandle = res.response.GetReadHandle().GetHandle()
+ if failSpec != nil {
+ r.data <- failSpec
+ }
+ return nil
}
-func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
- s := callSettings(c.settings, opts...)
+// Add will add current range to stream.
+func (mr *gRPCBidiReader) add(output io.Writer, offset, limit int64, callback func(int64, int64, error)) {
+ mr.mu.Lock()
+ objectSize := mr.objectSize
+ mr.mu.Unlock()
+
+ if offset > objectSize {
+ callback(offset, 0, fmt.Errorf("offset larger than size of object: %v", objectSize))
+ return
+ }
+ if limit < 0 {
+ callback(offset, 0, fmt.Errorf("limit can't be negative"))
+ return
+ }
+ mr.mu.Lock()
+ currentID := (*mr).readID
+ (*mr).readID++
+ if !mr.done {
+ spec := rangeSpec{readID: currentID, writer: output, offset: offset, limit: limit, currentBytesWritten: 0, totalBytesWritten: 0, callback: callback}
+ mr.activeTask++
+ mr.data <- []rangeSpec{spec}
+ } else {
+ callback(offset, 0, fmt.Errorf("stream is closed, can't add range"))
+ }
+ mr.mu.Unlock()
+}
- var offset int64
- errorf := params.setError
- progress := params.progress
- setObj := params.setObj
+func (mr *gRPCBidiReader) wait() {
+ mr.mu.Lock()
+ // we should wait until there is active task or an entry in the map.
+ // there can be a scenario we have nothing in map for a moment or too but still have active task.
+ // hence in case we have permanent errors we reduce active task to 0 so that this does not block wait.
+ keepWaiting := len(mr.mp) != 0 || mr.activeTask != 0
+ mr.mu.Unlock()
- pr, pw := io.Pipe()
- gw := newGRPCWriter(c, params, pr)
- gw.settings = s
- if s.userProject != "" {
- gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject)
+ for keepWaiting {
+ mr.mu.Lock()
+ keepWaiting = len(mr.mp) != 0 || mr.activeTask != 0
+ mr.mu.Unlock()
}
+}
- // This function reads the data sent to the pipe and sends sets of messages
- // on the gRPC client-stream as the buffer is filled.
- go func() {
- defer close(params.donec)
+// Close will notify stream manager goroutine that the reader has been closed, if it's still running.
+func (mr *gRPCBidiReader) close() error {
+ if mr.cancel != nil {
+ mr.cancel()
+ }
+ mr.mu.Lock()
+ mr.done = true
+ mr.activeTask = 0
+ mr.mu.Unlock()
+ mr.closeReceiver <- true
+ mr.closeManager <- true
+ return nil
+}
- // Loop until there is an error or the Object has been finalized.
- for {
- // Note: This blocks until either the buffer is full or EOF is read.
- recvd, doneReading, err := gw.read()
- if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
- }
+func (mrr *gRPCBidiReader) getHandle() []byte {
+ return mrr.readHandle
+}
- if params.attrs.Retention != nil {
- // TO-DO: remove once ObjectRetention is available - see b/308194853
- err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
- errorf(err)
- pr.CloseWithError(err)
- return
- }
- // The chunk buffer is full, but there is no end in sight. This
- // means that either:
- // 1. A resumable upload will need to be used to send
- // multiple chunks, until we are done reading data. Start a
- // resumable upload if it has not already been started.
- // 2. ChunkSize of zero may also have a full buffer, but a resumable
- // session should not be initiated in this case.
- if !doneReading && gw.upid == "" && params.chunkSize != 0 {
- err = gw.startResumableUpload()
- if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
- }
- }
+func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
+ // If bidi reads was not selected, use the legacy read object API.
+ if !c.config.grpcBidiReads {
+ return c.NewRangeReaderReadObject(ctx, params, opts...)
+ }
- o, off, err := gw.uploadBuffer(recvd, offset, doneReading)
- if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
- }
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
+ defer func() { trace.EndSpan(ctx, err) }()
- // At this point, the current buffer has been uploaded. For resumable
- // uploads and chunkSize = 0, capture the committed offset here in case
- // the upload was not finalized and another chunk is to be uploaded. Call
- // the progress function for resumable uploads only.
- if gw.upid != "" || gw.chunkSize == 0 {
- offset = off
- }
- if gw.upid != "" {
- progress(offset)
- }
+ s := callSettings(c.settings, opts...)
- // When we are done reading data without errors, set the object and
- // finish.
- if doneReading {
- // Build Object from server's response.
- setObj(newObjectFromProto(o))
- return
- }
+ s.gax = append(s.gax, gax.WithGRPCOptions(
+ grpc.ForceCodecV2(bytesCodecV2{}),
+ ))
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ b := bucketResourceName(globalProjectAlias, params.bucket)
+
+ // Create a BidiReadObjectRequest.
+ spec := &storagepb.BidiReadObjectSpec{
+ Bucket: b,
+ Object: params.object,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
+ }
+ if err := applyCondsProto("gRPCReader.NewRangeReader", params.gen, params.conds, spec); err != nil {
+ return nil, err
+ }
+ if params.handle != nil && len(*params.handle) != 0 {
+ spec.ReadHandle = &storagepb.BidiReadHandle{
+ Handle: *params.handle,
+ }
+ }
+ req := &storagepb.BidiReadObjectRequest{
+ ReadObjectSpec: spec,
+ }
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
+
+ // Define a function that initiates a Read with offset and length, assuming
+ // we have already read seen bytes.
+ reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
+ // If the context has already expired, return immediately without making
+ // we call.
+ if err := ctx.Err(); err != nil {
+ return nil, nil, err
+ }
+
+ cc, cancel := context.WithCancel(ctx)
+
+ // BidiReadObject can take multiple ranges, but we just request one in this case.
+ readRange := &storagepb.ReadRange{
+ ReadOffset: params.offset + seen,
+ ReadId: 1,
+ }
+
+ // Only set a ReadLength if length is greater than zero, because <= 0 means
+ // to read it all.
+ if params.length > 0 {
+ readRange.ReadLength = params.length - seen
+ }
+
+ req.ReadRanges = []*storagepb.ReadRange{readRange}
+
+ var stream storagepb.Storage_BidiReadObjectClient
+ var err error
+ var decoder *readResponseDecoder
+
+ err = run(cc, func(ctx context.Context) error {
+ stream, err = c.raw.BidiReadObject(ctx, s.gax...)
+ if err != nil {
+ return err
+ }
+ if err := stream.Send(req); err != nil {
+ return err
+ }
+ // Oneshot reads can close the client->server side immediately.
+ if err := stream.CloseSend(); err != nil {
+ return err
+ }
+
+ // Receive the message into databuf as a wire-encoded message so we can
+ // use a custom decoder to avoid an extra copy at the protobuf layer.
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
+ // These types of errors show up on the RecvMsg call, rather than the
+ // initialization of the stream via BidiReadObject above.
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return formatObjectErr(err)
+ }
+ if err != nil {
+ return err
+ }
+ // Use a custom decoder that uses protobuf unmarshalling for all
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
+ return err
+ }, s.retry, s.idempotent)
+ if err != nil {
+ // Close the stream context we just created to ensure we don't leak
+ // resources.
+ cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
+ return nil, nil, err
+ }
+
+ return &readStreamResponse{
+ stream: stream,
+ decoder: decoder,
+ }, cancel, nil
+ }
+
+ res, cancel, err := reopen(0)
+ if err != nil {
+ return nil, err
+ }
+ // The first message was Recv'd on stream open, use it to populate the
+ // object metadata and read handle.
+ msg := res.decoder.msg
+ obj := msg.GetMetadata()
+ handle := ReadHandle(msg.GetReadHandle().GetHandle())
+ // This is the size of the entire object, even if only a range was requested.
+ size := obj.GetSize()
+
+ // Only support checksums when reading an entire object, not a range.
+ var (
+ wantCRC uint32
+ checkCRC bool
+ )
+ if checksums := obj.GetChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
}
+ wantCRC = checksums.GetCrc32C()
+ }
+
+ startOffset := params.offset
+ if params.offset < 0 {
+ startOffset = size + params.offset
+ }
+
+ // The remaining bytes are the lesser of the requested range and all bytes
+ // after params.offset.
+ length := params.length
+ if params.length > size || params.length < 0 {
+ // if params.length < 0 (or larger than object size),
+ // all remaining bytes were requested.
+ length = size
+ }
+ remain := length - startOffset
+
+ metadata := obj.GetMetadata()
+ r = &Reader{
+ Attrs: ReaderObjectAttrs{
+ Size: size,
+ StartOffset: startOffset,
+ ContentType: obj.GetContentType(),
+ ContentEncoding: obj.GetContentEncoding(),
+ CacheControl: obj.GetCacheControl(),
+ LastModified: obj.GetUpdateTime().AsTime(),
+ Metageneration: obj.GetMetageneration(),
+ Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
+ },
+ objectMetadata: &metadata,
+ reader: &gRPCReader{
+ stream: res.stream,
+ reopen: reopen,
+ cancel: cancel,
+ size: size,
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
+ settings: s,
+ zeroRange: params.length == 0,
+ wantCRC: wantCRC,
+ checkCRC: checkCRC,
+ },
+ checkCRC: checkCRC,
+ handle: &handle,
+ remain: remain,
+ }
+
+ // For a zero-length request, explicitly close the stream and set remaining
+ // bytes to zero.
+ if params.length == 0 {
+ r.remain = 0
+ r.reader.Close()
+ }
+
+ return r, nil
+}
+
+func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
+ var offset int64
+ errorf := params.setError
+ setObj := params.setObj
+ setFlush := params.setFlush
+ pr, pw := io.Pipe()
+
+ s := callSettings(c.settings, opts...)
+
+ retryDeadline := defaultWriteChunkRetryDeadline
+ if params.chunkRetryDeadline != 0 {
+ retryDeadline = params.chunkRetryDeadline
+ }
+ if s.retry == nil {
+ s.retry = defaultRetry.clone()
+ }
+ s.retry.maxRetryDuration = retryDeadline
+
+ // This function reads the data sent to the pipe and sends sets of messages
+ // on the gRPC client-stream as the buffer is filled.
+ go func() {
+ err := func() error {
+ // Unless the user told us the content type, we have to determine it from
+ // the first read.
+ var r io.Reader = pr
+ if params.attrs.ContentType == "" && !params.forceEmptyContentType {
+ r, params.attrs.ContentType = gax.DetermineContentType(r)
+ }
+
+ var gw *gRPCWriter
+ gw, err := newGRPCWriter(c, s, params, r, pw, params.setPipeWriter)
+ if err != nil {
+ return err
+ }
+
+ // Set Flush func for use by exported Writer.Flush.
+ setFlush(func() (int64, error) {
+ return gw.flush()
+ })
+
+ // Loop until there is an error or the Object has been finalized.
+ for {
+ // Note: This blocks until either the buffer is full or EOF is read.
+ recvd, doneReading, err := gw.read()
+ if err != nil {
+ return err
+ }
+
+ var o *storagepb.Object
+ uploadBuff := func(ctx context.Context) error {
+ obj, err := gw.uploadBuffer(ctx, recvd, offset, doneReading)
+ o = obj
+ return err
+ }
+
+ // Add routing headers to the context metadata for single-shot and resumable
+ // writes. Append writes need to set this at a lower level to pass the routing
+ // token.
+ bctx := gw.ctx
+ if !gw.append {
+ bctx = bucketContext(bctx, gw.bucket)
+ }
+ err = run(bctx, uploadBuff, gw.settings.retry, s.idempotent)
+ if err != nil {
+ return err
+ }
+ offset += int64(recvd)
+
+ // When we are done reading data without errors, set the object and
+ // finish.
+ if doneReading {
+ // Build Object from server's response.
+ setObj(newObjectFromProto(o))
+ return nil
+ }
+ }
+ }()
+
+ // These calls are still valid if err is nil
+ err = checkCanceled(err)
+ errorf(err)
+ pr.CloseWithError(err)
+ close(params.donec)
}()
return pw, nil
@@ -1172,213 +1842,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
return res.Permissions, nil
}
-// HMAC Key methods.
+// HMAC Key methods are not implemented in gRPC client.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
- s := callSettings(c.settings, opts...)
- req := &storagepb.ListHmacKeysRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- ShowDeletedKeys: showDeletedKeys,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
it := &HMACKeysIterator{
ctx: ctx,
- projectID: project,
- retry: s.retry,
+ projectID: "",
+ retry: nil,
}
- fetch := func(pageSize int, pageToken string) (token string, err error) {
- var hmacKeys []*storagepb.HmacKeyMetadata
- err = run(it.ctx, func(ctx context.Context) error {
- gitr := c.raw.ListHmacKeys(ctx, req, s.gax...)
- hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- for _, hkmd := range hmacKeys {
- hk := toHMACKeyFromProto(hkmd)
- it.hmacKeys = append(it.hmacKeys, hk)
- }
-
- return token, nil
+ fetch := func(_ int, _ string) (token string, err error) {
+ return "", errMethodNotSupported
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
- func() int { return len(it.hmacKeys) - it.index },
- func() interface{} {
- prev := it.hmacKeys
- it.hmacKeys = it.hmacKeys[:0]
- it.index = 0
- return prev
- })
+ func() int { return 0 },
+ func() interface{} { return nil },
+ )
return it
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- hk := &storagepb.HmacKeyMetadata{
- AccessId: accessID,
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- State: string(attrs.State),
- Etag: attrs.Etag,
- }
- var paths []string
- fieldMask := &fieldmaskpb.FieldMask{
- Paths: paths,
- }
- if attrs.State != "" {
- fieldMask.Paths = append(fieldMask.Paths, "state")
- }
- req := &storagepb.UpdateHmacKeyRequest{
- HmacKey: hk,
- UpdateMask: fieldMask,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateHmacKeyRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var res *storagepb.CreateHmacKeyResponse
- err := run(ctx, func(ctx context.Context) error {
- var err error
- res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- key := toHMACKeyFromProto(res.Metadata)
- key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
-
- return key, nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteHmacKey(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
-// Notification methods.
+// Notification methods are not implemented in gRPC client.
func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- req := &storagepb.ListNotificationConfigsRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- }
- var notifications []*storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...)
- for {
- // PageSize is not set and fallbacks to the API default pageSize of 100.
- items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken())
- if err != nil {
- return err
- }
- notifications = append(notifications, items...)
- // If there are no more results, nextPageToken is empty and err is nil.
- if nextPageToken == "" {
- return err
- }
- req.PageToken = nextPageToken
- }
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
-
- return notificationsToMapFromProto(notifications), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateNotificationConfigRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- NotificationConfig: toProtoNotification(n),
- }
- var pbn *storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- var err error
- pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toNotificationFromProto(pbn), err
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteNotificationConfigRequest{Name: id}
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteNotificationConfig(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
// setUserProjectMetadata appends a project ID to the outgoing Context metadata
@@ -1391,24 +1901,76 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context
}
type readStreamResponse struct {
- stream storagepb.Storage_ReadObjectClient
- response *storagepb.ReadObjectResponse
+ stream storagepb.Storage_BidiReadObjectClient
+ decoder *readResponseDecoder
+}
+
+type bidiReadStreamResponse struct {
+ stream storagepb.Storage_BidiReadObjectClient
+ response *storagepb.BidiReadObjectResponse
+}
+
+type gRPCBidiReader struct {
+ stream storagepb.Storage_BidiReadObjectClient
+ cancel context.CancelFunc
+ settings *settings
+ readHandle ReadHandle
+ readID int64
+ reopen func(ReadHandle) (*bidiReadStreamResponse, context.CancelFunc, error)
+ readSpec *storagepb.BidiReadObjectSpec
+ data chan []rangeSpec
+ ctx context.Context
+ closeReceiver chan bool
+ closeManager chan bool
+ managerRetry chan bool
+ receiverRetry chan bool
+ mu sync.Mutex // protects all vars in gRPCBidiReader from concurrent access
+ mp map[int64]rangeSpec // always use the mutex when accessing the map
+ done bool // always use the mutex when accessing this variable
+ activeTask int64 // always use the mutex when accessing this variable
+ objectSize int64 // always use the mutex when accessing this variable
+ retrier func(error, string)
+ streamRecreation bool // This helps us identify if stream recreation is in progress or not. If stream recreation gets called from two goroutine then this will stop second one.
}
+// gRPCReader is used by storage.Reader if the experimental option WithGRPCBidiReads is passed.
type gRPCReader struct {
seen, size int64
zeroRange bool
- stream storagepb.Storage_ReadObjectClient
+ stream storagepb.Storage_BidiReadObjectClient
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
+ currMsg *readResponseDecoder // decoder for the current message
cancel context.CancelFunc
settings *settings
+ checkCRC bool // should we check the CRC?
+ wantCRC uint32 // the CRC32c value the server sent in the header
+ gotCRC uint32 // running crc
+}
+
+// Update the running CRC with the data in the slice, if CRC checking was enabled.
+func (r *gRPCReader) updateCRC(b []byte) {
+ if r.checkCRC {
+ r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b)
+ }
+}
+
+// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled.
+func (r *gRPCReader) runCRCCheck() error {
+ if r.checkCRC && r.gotCRC != r.wantCRC {
+ return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC)
+ }
+ return nil
}
// Read reads bytes into the user's buffer from an open gRPC stream.
func (r *gRPCReader) Read(p []byte) (int, error) {
- // The entire object has been read by this reader, return EOF.
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return EOF.
if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
return 0, io.EOF
}
@@ -1417,21 +1979,25 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// using the same reader. One encounters an error and the stream is closed
// and then reopened while the other routine attempts to read from it.
if r.stream == nil {
- return 0, fmt.Errorf("reader has been closed")
+ return 0, fmt.Errorf("storage: reader has been closed")
}
var n int
- // Read leftovers and return what was available to conform to the Reader
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
// interface: https://pkg.go.dev/io#Reader.
- if len(r.leftovers) > 0 {
- n = copy(p, r.leftovers)
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(n)
- r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
- msg, err := r.recv()
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
if err != nil {
return 0, err
}
@@ -1443,32 +2009,96 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- content := msg.GetChecksummedData().GetContent()
- n = copy(p[n:], content)
- leftover := len(content) - n
- if leftover > 0 {
- // Wasn't able to copy all of the data in the message, store for
- // future Read calls.
- r.leftovers = content[n:]
- }
- r.seen += int64(n)
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
return n, nil
}
+// WriteTo writes all the data requested by the Reader into w, implementing
+// io.WriterTo.
+func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return nil.
+ if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+
+ // No stream to read from, either never initialized or Close was called.
+ // Note: There is a potential concurrency issue if multiple routines are
+ // using the same reader. One encounters an error and the stream is closed
+ // and then reopened while the other routine attempts to read from it.
+ if r.stream == nil {
+ return 0, fmt.Errorf("storage: reader has been closed")
+ }
+
+ // Track bytes written during before call.
+ var alreadySeen = r.seen
+
+ // Write any already received message to the stream. There will be some leftovers from the
+ // original NewRangeReader call.
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ r.currMsg = nil
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+ // Loop and receive additional messages until the entire data is written.
+ for {
+ // Attempt to receive the next message on the stream.
+ // Will terminate with io.EOF once data has all come through.
+ // recv() handles stream reopening and retry logic so no need for retries here.
+ err := r.recv()
+ if err != nil {
+ if err == io.EOF {
+ // We are done; check the checksum if necessary and return.
+ err = r.runCRCCheck()
+ }
+ return r.seen - alreadySeen, err
+ }
+
+ // TODO: Determine if we need to capture incremental CRC32C for this
+ // chunk. The Object CRC32C checksum is captured when directed to read
+ // the entire Object. If directed to read a range, we may need to
+ // calculate the range's checksum for verification if the checksum is
+ // present in the response here.
+ // TODO: Figure out if we need to support decompressive transcoding
+ // https://cloud.google.com/storage/docs/transcoding.
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+}
+
// Close cancels the read stream's context in order for it to be closed and
-// collected.
+// collected, and frees any currently in use buffers.
func (r *gRPCReader) Close() error {
if r.cancel != nil {
r.cancel()
}
- r.stream = nil
+ r.currMsg = nil
return nil
}
-// recv attempts to Recv the next message on the stream. In the event
-// that a retryable error is encountered, the stream will be closed, reopened,
-// and Recv again. This will attempt to Recv until one of the following is true:
+// recv attempts to Recv the next message on the stream and extract the object
+// data that it contains. In the event that a retryable error is encountered,
+// the stream will be closed, reopened, and RecvMsg again.
+// This will attempt to Recv until one of the following is true:
//
// * Recv is successful
// * A non-retryable error is encountered
@@ -1476,8 +2106,10 @@ func (r *gRPCReader) Close() error {
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
-func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
- msg, err := r.stream.Recv()
+
+func (r *gRPCReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
shouldRetry = r.settings.retry.shouldRetry
@@ -1486,377 +2118,858 @@ func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
- // successful, the next logical chunk will be returned.
- msg, err = r.reopenStream()
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
+ }
+
+ if err != nil {
+ return err
+ }
+
+ r.currMsg = &readResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
+}
+
+// ReadObjectResponse field and subfield numbers.
+const (
+ // Top level fields.
+ metadataField = protowire.Number(4)
+ objectRangeDataField = protowire.Number(6)
+ readHandleField = protowire.Number(7)
+ // Nested in ObjectRangeData
+ checksummedDataField = protowire.Number(1)
+ readRangeField = protowire.Number(2)
+ rangeEndField = protowire.Number(3)
+ // Nested in ObjectRangeData.ChecksummedData
+ checksummedDataContentField = protowire.Number(1)
+ checksummedDataCRC32CField = protowire.Number(2)
+)
+
+// readResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.BidiReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsets // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsets struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
+ if err != nil {
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
+ }
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err)
+ }
+ offsets := bufferSliceOffsets{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
+ }
+
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
+
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
+}
+
+// readFullObjectResponse returns the BidiReadObjectResponse that is encoded in the
+// wire-encoded message buffer b, or an error if the message is invalid.
+// This must be used on the first recv of an object as it may contain all fields
+// of BidiReadObjectResponse, and we use or pass on those fields to the user.
+// This function is essentially identical to proto.Unmarshal, except it aliases
+// the data in the input []byte. If the proto library adds a feature to
+// Unmarshal that does that, this function can be dropped.
+func (d *readResponseDecoder) readFullObjectResponse() error {
+ msg := &storagepb.BidiReadObjectResponse{}
+
+ // Loop over the entire message, extracting fields as we go. This does not
+ // handle field concatenation, in which the contents of a single field
+ // are split across multiple protobuf tags.
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
+ }
+
+ // Unmarshal the field according to its type. Only fields that are not
+ // nil will be present.
+ switch {
+ // This is a repeated field, so it can occur more than once. But, for now
+ // we can just take the first range per message since Reader only requests
+ // a single range.
+ // See https://protobuf.dev/programming-guides/encoding/#optional
+ // TODO: support multiple ranges once integrated with MultiRangeDownloader.
+ case fieldNum == objectRangeDataField && fieldType == protowire.BytesType:
+ // The object data field was found. Initialize the data ranges assuming
+ // exactly one range in the message.
+ msg.ObjectDataRanges = []*storagepb.ObjectRangeData{{ChecksummedData: &storagepb.ChecksummedData{}, ReadRange: &storagepb.ReadRange{}}}
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
+ }
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming objectRangeData tag: %w", err)
+ }
+
+ switch {
+ case gotNum == checksummedDataField && gotTyp == protowire.BytesType:
+ checksummedDataFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
+ }
+ var checksummedDataEndOff = d.off + checksummedDataFieldLen
+ for d.off < checksummedDataEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
+ }
+ switch {
+ case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ChecksummedData.Content: %w", err)
+ }
+ case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ChecksummedData.Crc32C: %w", err)
+ }
+ msg.ObjectDataRanges[0].ChecksummedData.Crc32C = &v
+ default:
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in BidiReadObjectResponse.ChecksummedData: %w", err)
+ }
+ }
+ }
+ case gotNum == readRangeField && gotTyp == protowire.BytesType:
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ObjectDataRange.ReadRange: %v", err)
+ }
+
+ if err := proto.Unmarshal(buf, msg.ObjectDataRanges[0].ReadRange); err != nil {
+ return err
+ }
+ case gotNum == rangeEndField && gotTyp == protowire.VarintType: // proto encodes bool as int32
+ b, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("invalid ObjectDataRange.RangeEnd: %w", err)
+ }
+ msg.ObjectDataRanges[0].RangeEnd = protowire.DecodeBool(b)
+ }
+
+ }
+ case fieldNum == metadataField && fieldType == protowire.BytesType:
+ msg.Metadata = &storagepb.Object{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.Metadata: %v", err)
+ }
+
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
+ }
+ case fieldNum == readHandleField && fieldType == protowire.BytesType:
+ msg.ReadHandle = &storagepb.BidiReadHandle{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ReadHandle: %v", err)
+ }
+
+ if err := proto.Unmarshal(buf, msg.ReadHandle); err != nil {
+ return err
+ }
+ default:
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in BidiReadObjectResponse: %w", err)
+ }
+ }
}
+ d.msg = msg
- return msg, err
+ return nil
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
-func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) {
+func (r *gRPCReader) reopenStream() error {
// Close existing stream and initialize new stream with updated offset.
r.Close()
res, cancel, err := r.reopen(r.seen)
if err != nil {
- return nil, err
+ return err
}
r.stream = res.stream
+ r.currMsg = res.decoder
r.cancel = cancel
- return res.response, nil
+ return nil
}
-func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter {
- size := params.chunkSize
+func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, r io.Reader, pw *io.PipeWriter, setPipeWriter func(*io.PipeWriter)) (*gRPCWriter, error) {
+ if params.attrs.Retention != nil {
+ // TO-DO: remove once ObjectRetention is available - see b/308194853
+ return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
+ }
+
+ size := googleapi.MinUploadChunkSize
+ // A completely bufferless upload (params.chunkSize <= 0) is not possible in
+ // gRPC because the buffer must be provided to the message. Use the minimum
+ // size possible.
+ if params.chunkSize > 0 {
+ size = params.chunkSize
+ }
// Round up chunksize to nearest 256KiB
if size%googleapi.MinUploadChunkSize != 0 {
size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize)
}
- // A completely bufferless upload is not possible as it is in JSON because
- // the buffer must be provided to the message. However use the minimum size
- // possible in this case.
- if params.chunkSize == 0 {
- size = googleapi.MinUploadChunkSize
+ if s.userProject != "" {
+ params.ctx = setUserProjectMetadata(params.ctx, s.userProject)
}
- return &gRPCWriter{
- buf: make([]byte, size),
- c: c,
- ctx: params.ctx,
- reader: r,
- bucket: params.bucket,
- attrs: params.attrs,
- conds: params.conds,
- encryptionKey: params.encryptionKey,
- sendCRC32C: params.sendCRC32C,
- chunkSize: params.chunkSize,
+ spec := &storagepb.WriteObjectSpec{
+ Resource: params.attrs.toProtoObject(params.bucket),
+ Appendable: proto.Bool(params.append),
+ }
+ // WriteObject doesn't support the generation condition, so use default.
+ if err := applyCondsProto("WriteObject", defaultGen, params.conds, spec); err != nil {
+ return nil, err
}
+
+ return &gRPCWriter{
+ buf: make([]byte, size),
+ c: c,
+ ctx: params.ctx,
+ reader: r,
+ pw: pw,
+ bucket: params.bucket,
+ attrs: params.attrs,
+ conds: params.conds,
+ spec: spec,
+ encryptionKey: params.encryptionKey,
+ settings: s,
+ progress: params.progress,
+ sendCRC32C: params.sendCRC32C,
+ forceOneShot: params.chunkSize <= 0,
+ forceEmptyContentType: params.forceEmptyContentType,
+ append: params.append,
+ setPipeWriter: setPipeWriter,
+ flushComplete: make(chan int64),
+ }, nil
}
// gRPCWriter is a wrapper around the the gRPC client-stream API that manages
// sending chunks of data provided by the user over the stream.
type gRPCWriter struct {
- c *grpcStorageClient
- buf []byte
- reader io.Reader
+ c *grpcStorageClient
+ buf []byte
+ reader io.Reader
+ pw *io.PipeWriter
+ setPipeWriter func(*io.PipeWriter) // used to set in parent storage.Writer
ctx context.Context
bucket string
attrs *ObjectAttrs
conds *Conditions
+ spec *storagepb.WriteObjectSpec
encryptionKey []byte
settings *settings
+ progress func(int64)
- sendCRC32C bool
- chunkSize int
+ sendCRC32C bool
+ forceOneShot bool
+ forceEmptyContentType bool
+ append bool
- // The gRPC client-stream used for sending buffers.
- stream storagepb.Storage_BidiWriteObjectClient
+ streamSender gRPCBidiWriteBufferSender
+ flushInProgress bool // true when the pipe is being recreated for a flush.
+ flushComplete chan int64 // use to signal back to flush call that flush to server was completed.
+}
- // The Resumable Upload ID started by a gRPC-based Writer.
- upid string
+func bucketContext(ctx context.Context, bucket string) context.Context {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(bucket))}
+ return gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
}
-// startResumableUpload initializes a Resumable Upload with gRPC and sets the
-// upload ID on the Writer.
-func (w *gRPCWriter) startResumableUpload() error {
- spec, err := w.writeObjectSpec()
- if err != nil {
- return err
+// drainInboundStream calls stream.Recv() repeatedly until an error is returned.
+// It returns the last Resource received on the stream, or nil if no Resource
+// was returned. drainInboundStream always returns a non-nil error. io.EOF
+// indicates all messages were successfully read.
+func drainInboundStream(stream storagepb.Storage_BidiWriteObjectClient) (object *storagepb.Object, err error) {
+ for err == nil {
+ var resp *storagepb.BidiWriteObjectResponse
+ resp, err = stream.Recv()
+ // GetResource() returns nil on a nil response
+ if resp.GetResource() != nil {
+ object = resp.GetResource()
+ }
}
- req := &storagepb.StartResumableWriteRequest{
- WriteObjectSpec: spec,
- CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ return object, err
+}
+
+func bidiWriteObjectRequest(buf []byte, offset int64, flush, finishWrite bool) *storagepb.BidiWriteObjectRequest {
+ return &storagepb.BidiWriteObjectRequest{
+ Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{
+ ChecksummedData: &storagepb.ChecksummedData{
+ Content: buf,
+ },
+ },
+ WriteOffset: offset,
+ FinishWrite: finishWrite,
+ Flush: flush,
+ StateLookup: flush,
}
- // TODO: Currently the checksums are only sent on the request to initialize
- // the upload, but in the future, we must also support sending it
- // on the *last* message of the stream.
- req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs)
- return run(w.ctx, func(ctx context.Context) error {
- upres, err := w.c.raw.StartResumableWrite(w.ctx, req)
- w.upid = upres.GetUploadId()
- return err
- }, w.settings.retry, w.settings.idempotent)
}
-// queryProgress is a helper that queries the status of the resumable upload
-// associated with the given upload ID.
-func (w *gRPCWriter) queryProgress() (int64, error) {
- var persistedSize int64
- err := run(w.ctx, func(ctx context.Context) error {
- q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{
- UploadId: w.upid,
- })
- persistedSize = q.GetPersistedSize()
- return err
- }, w.settings.retry, true)
+type gRPCBidiWriteBufferSender interface {
+ // sendBuffer implementations should upload buf, respecting flush and
+ // finishWrite. Callers must guarantee that buf is not too long to fit in a
+ // gRPC message.
+ //
+ // If flush is true, implementations must not return until the data in buf is
+ // stable. If finishWrite is true, implementations must return the object on
+ // success.
+ sendBuffer(ctx context.Context, buf []byte, offset int64, flush, finishWrite bool) (*storagepb.Object, error)
+}
- // q.GetCommittedSize() will return 0 if q is nil.
- return persistedSize, err
+type gRPCOneshotBidiWriteBufferSender struct {
+ firstMessage *storagepb.BidiWriteObjectRequest
+ raw *gapic.Client
+ stream storagepb.Storage_BidiWriteObjectClient
+ settings *settings
}
-// uploadBuffer uploads the buffer at the given offset using a bi-directional
-// Write stream. It will open a new stream if necessary (on the first call or
-// after resuming from failure). The resulting write offset after uploading the
-// buffer is returned, as well as well as the final Object if the upload is
-// completed.
-//
-// Returns object, persisted size, and any error that is not retriable.
-func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) {
- var shouldRetry = ShouldRetry
- if w.settings.retry != nil && w.settings.retry.shouldRetry != nil {
- shouldRetry = w.settings.retry.shouldRetry
+func (w *gRPCWriter) newGRPCOneshotBidiWriteBufferSender() (*gRPCOneshotBidiWriteBufferSender, error) {
+ firstMessage := &storagepb.BidiWriteObjectRequest{
+ FirstMessage: &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
+ WriteObjectSpec: w.spec,
+ },
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ // For a non-resumable upload, checksums must be sent in this message.
+ // TODO: Currently the checksums are only sent on the first message
+ // of the stream, but in the future, we must also support sending it
+ // on the *last* message of the stream (instead of the first).
+ ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
}
- var err error
- var lastWriteOfEntireObject bool
-
- sent := 0
- writeOffset := start
-
- toWrite := w.buf[:recvd]
+ return &gRPCOneshotBidiWriteBufferSender{
+ firstMessage: firstMessage,
+ raw: w.c.raw,
+ settings: w.settings,
+ }, nil
+}
- // Send a request with as many bytes as possible.
- // Loop until all bytes are sent.
- for {
- bytesNotYetSent := recvd - sent
- remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize
+func (s *gRPCOneshotBidiWriteBufferSender) sendBuffer(ctx context.Context, buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ var firstMessage *storagepb.BidiWriteObjectRequest
+ if s.stream == nil {
+ s.stream, err = s.raw.BidiWriteObject(ctx, s.settings.gax...)
+ if err != nil {
+ return
+ }
+ firstMessage = s.firstMessage
+ }
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if firstMessage != nil {
+ proto.Merge(req, firstMessage)
+ }
- if remainingDataFitsInSingleReq && doneReading {
- lastWriteOfEntireObject = true
+ sendErr := s.stream.Send(req)
+ if sendErr != nil {
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if sendErr != io.EOF {
+ err = sendErr
}
+ return
+ }
+ // Oneshot uploads assume all flushes succeed
- // Send the maximum amount of bytes we can, unless we don't have that many.
- bytesToSendInCurrReq := maxPerMessageWriteSize
- if remainingDataFitsInSingleReq {
- bytesToSendInCurrReq = bytesNotYetSent
+ if finishWrite {
+ s.stream.CloseSend()
+ // Oneshot uploads only read from the response stream on completion or
+ // failure
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if err == io.EOF {
+ err = nil
}
+ }
+ return
+}
- // Prepare chunk section for upload.
- data := toWrite[sent : sent+bytesToSendInCurrReq]
+type gRPCResumableBidiWriteBufferSender struct {
+ queryRetry *retryConfig
+ upid string
+ progress func(int64)
+ raw *gapic.Client
+ forceFirstMessage bool
+ stream storagepb.Storage_BidiWriteObjectClient
+ flushOffset int64
+ settings *settings
+}
- req := &storagepb.BidiWriteObjectRequest{
- Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{
- ChecksummedData: &storagepb.ChecksummedData{
- Content: data,
- },
- },
- WriteOffset: writeOffset,
- FinishWrite: lastWriteOfEntireObject,
- Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
- StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
- }
+func (w *gRPCWriter) newGRPCResumableBidiWriteBufferSender(ctx context.Context) (*gRPCResumableBidiWriteBufferSender, error) {
+ req := &storagepb.StartResumableWriteRequest{
+ WriteObjectSpec: w.spec,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ // TODO: Currently the checksums are only sent on the request to initialize
+ // the upload, but in the future, we must also support sending it
+ // on the *last* message of the stream.
+ ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
+ }
- // Open a new stream if necessary and set the first_message field on
- // the request. The first message on the WriteObject stream must either
- // be the Object or the Resumable Upload ID.
- if w.stream == nil {
- hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))}
- ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...)
+ var upid string
+ err := run(ctx, func(ctx context.Context) error {
+ upres, err := w.c.raw.StartResumableWrite(ctx, req, w.settings.gax...)
+ upid = upres.GetUploadId()
+ return err
+ }, w.settings.retry, w.settings.idempotent)
+ if err != nil {
+ return nil, err
+ }
- w.stream, err = w.c.raw.BidiWriteObject(ctx)
- if err != nil {
- return nil, 0, err
- }
+ // Set up an initial connection for the 0 offset, so we don't query state
+ // unnecessarily for the first buffer. If we fail, we'll just retry in the
+ // normal connect path.
+ stream, err := w.c.raw.BidiWriteObject(ctx, w.settings.gax...)
+ if err != nil {
+ stream = nil
+ }
- if w.upid != "" { // resumable upload
- req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid}
- } else { // non-resumable
- spec, err := w.writeObjectSpec()
- if err != nil {
- return nil, 0, err
- }
- req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
- WriteObjectSpec: spec,
- }
- req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey)
- // For a non-resumable upload, checksums must be sent in this message.
- // TODO: Currently the checksums are only sent on the first message
- // of the stream, but in the future, we must also support sending it
- // on the *last* message of the stream (instead of the first).
- req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs)
- }
- }
+ return &gRPCResumableBidiWriteBufferSender{
+ queryRetry: w.settings.retry,
+ upid: upid,
+ progress: w.progress,
+ raw: w.c.raw,
+ forceFirstMessage: true,
+ stream: stream,
+ settings: w.settings,
+ }, nil
+}
- err = w.stream.Send(req)
- if err == io.EOF {
- // err was io.EOF. The client-side of a stream only gets an EOF on Send
- // when the backend closes the stream and wants to return an error
- // status.
+// queryProgress is a helper that queries the status of the resumable upload
+// associated with the given upload ID.
+func (s *gRPCResumableBidiWriteBufferSender) queryProgress(ctx context.Context) (int64, error) {
+ var persistedSize int64
+ err := run(ctx, func(ctx context.Context) error {
+ q, err := s.raw.QueryWriteStatus(ctx, &storagepb.QueryWriteStatusRequest{
+ UploadId: s.upid,
+ }, s.settings.gax...)
+ // q.GetPersistedSize() will return 0 if q is nil.
+ persistedSize = q.GetPersistedSize()
+ return err
+ }, s.queryRetry, true)
- // Receive from the stream Recv() until it returns a non-nil error
- // to receive the server's status as an error. We may get multiple
- // messages before the error due to buffering.
- err = nil
- for err == nil {
- _, err = w.stream.Recv()
- }
- // Drop the stream reference as a new one will need to be created if
- // we retry.
- w.stream = nil
-
- // Drop the stream reference as a new one will need to be created if
- // we can retry the upload
- w.stream = nil
-
- // Retriable errors mean we should start over and attempt to
- // resend the entire buffer via a new stream.
- // If not retriable, falling through will return the error received.
- if shouldRetry(err) {
- // TODO: Add test case for failure modes of querying progress.
- writeOffset, err = w.determineOffset(start)
- if err != nil {
- return nil, 0, err
- }
- sent = int(writeOffset) - int(start)
+ return persistedSize, err
+}
- // Continue sending requests, opening a new stream and resending
- // any bytes not yet persisted as per QueryWriteStatus
- continue
- }
- }
+func (s *gRPCResumableBidiWriteBufferSender) sendBuffer(ctx context.Context, buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ reconnected := false
+ if s.stream == nil {
+ // Determine offset and reconnect
+ s.flushOffset, err = s.queryProgress(ctx)
if err != nil {
- return nil, 0, err
+ return
}
-
- // Update the immediate stream's sent total and the upload offset with
- // the data sent.
- sent += len(data)
- writeOffset += int64(len(data))
-
- // Not done sending data, do not attempt to commit it yet, loop around
- // and send more data.
- if recvd-sent > 0 {
- continue
+ s.stream, err = s.raw.BidiWriteObject(ctx, s.settings.gax...)
+ if err != nil {
+ return
}
+ reconnected = true
+ }
- // The buffer has been uploaded and there is still more data to be
- // uploaded, but this is not a resumable upload session. Therefore,
- // don't check persisted data.
- if !lastWriteOfEntireObject && w.chunkSize == 0 {
- return nil, writeOffset, nil
+ // clean up buf. We'll still write the message if a flush/finishWrite was
+ // requested.
+ if offset < s.flushOffset {
+ trim := s.flushOffset - offset
+ if int64(len(buf)) <= trim {
+ trim = int64(len(buf))
}
+ buf = buf[trim:]
+ }
+ if len(buf) == 0 && !flush && !finishWrite {
+ // no need to send anything
+ return nil, nil
+ }
- // Done sending the data in the buffer (remainingDataFitsInSingleReq
- // should == true if we reach this code).
- // If we are done sending the whole object, close the stream and get the final
- // object. Otherwise, receive from the stream to confirm the persisted data.
- if !lastWriteOfEntireObject {
- resp, err := w.stream.Recv()
-
- // Retriable errors mean we should start over and attempt to
- // resend the entire buffer via a new stream.
- // If not retriable, falling through will return the error received
- // from closing the stream.
- if shouldRetry(err) {
- writeOffset, err = w.determineOffset(start)
- if err != nil {
- return nil, 0, err
- }
- sent = int(writeOffset) - int(start)
-
- // Drop the stream reference as a new one will need to be created.
- w.stream = nil
-
- continue
- }
- if err != nil {
- return nil, 0, err
- }
-
- if resp.GetPersistedSize() != writeOffset {
- // Retry if not all bytes were persisted.
- writeOffset = resp.GetPersistedSize()
- sent = int(writeOffset) - int(start)
- continue
- }
- } else {
- // If the object is done uploading, close the send stream to signal
- // to the server that we are done sending so that we can receive
- // from the stream without blocking.
- err = w.stream.CloseSend()
- if err != nil {
- // CloseSend() retries the send internally. It never returns an
- // error in the current implementation, but we check it anyway in
- // case that it does in the future.
- return nil, 0, err
- }
-
- // Stream receives do not block once send is closed, but we may not
- // receive the response with the object right away; loop until we
- // receive the object or error out.
- var obj *storagepb.Object
- for obj == nil {
- resp, err := w.stream.Recv()
- if err != nil {
- return nil, 0, err
- }
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if s.forceFirstMessage || reconnected {
+ req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: s.upid}
+ s.forceFirstMessage = false
+ }
- obj = resp.GetResource()
- }
+ sendErr := s.stream.Send(req)
+ if sendErr != nil {
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if err == io.EOF {
+ // This is unexpected - we got an error on Send(), but not on Recv().
+ // Bubble up the sendErr.
+ err = sendErr
+ }
+ return
+ }
- // Even though we received the object response, continue reading
- // until we receive a non-nil error, to ensure the stream does not
- // leak even if the context isn't cancelled. See:
- // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream
- for err == nil {
- _, err = w.stream.Recv()
+ if finishWrite {
+ s.stream.CloseSend()
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if err == io.EOF {
+ err = nil
+ if obj.GetSize() > s.flushOffset {
+ s.progress(obj.GetSize())
}
-
- return obj, writeOffset, nil
}
-
- return nil, writeOffset, nil
+ return
}
-}
-// determineOffset either returns the offset given to it in the case of a simple
-// upload, or queries the write status in the case a resumable upload is being
-// used.
-func (w *gRPCWriter) determineOffset(offset int64) (int64, error) {
- // For a Resumable Upload, we must start from however much data
- // was committed.
- if w.upid != "" {
- committed, err := w.queryProgress()
+ if flush {
+ resp, err := s.stream.Recv()
if err != nil {
- return 0, err
+ return nil, err
+ }
+ persistedOffset := resp.GetPersistedSize()
+ if persistedOffset > s.flushOffset {
+ s.flushOffset = persistedOffset
+ s.progress(s.flushOffset)
}
- offset = committed
}
- return offset, nil
+ return
}
-// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's
-// ObjectAttrs and applies its Conditions. This is only used for gRPC.
-func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
- // To avoid modifying the ObjectAttrs embeded in the calling writer, deref
- // the ObjectAttrs pointer to make a copy, then assign the desired name to
- // the attribute.
- attrs := *w.attrs
+// uploadBuffer uploads the buffer at the given offset using a bi-directional
+// Write stream. It will open a new stream if necessary (on the first call or
+// after resuming from failure) and chunk the buffer per maxPerMessageWriteSize.
+// The final Object is returned on success if doneReading is true.
+//
+// Returns object and any error that is not retriable.
+func (w *gRPCWriter) uploadBuffer(ctx context.Context, recvd int, start int64, doneReading bool) (obj *storagepb.Object, err error) {
+ if w.streamSender == nil {
+ if w.append {
+ // Appendable object semantics
+ w.streamSender, err = w.newGRPCAppendBidiWriteBufferSender()
+ } else if doneReading || w.forceOneShot {
+ // One shot semantics
+ w.streamSender, err = w.newGRPCOneshotBidiWriteBufferSender()
+ } else {
+ // Resumable write semantics
+ w.streamSender, err = w.newGRPCResumableBidiWriteBufferSender(ctx)
+ }
+ if err != nil {
+ return
+ }
+ }
- spec := &storagepb.WriteObjectSpec{
- Resource: attrs.toProtoObject(w.bucket),
+ data := w.buf[:recvd]
+ offset := start
+ // We want to go through this loop at least once, in case we have to
+ // finishWrite with an empty buffer.
+ for {
+ // Send as much as we can fit into a single gRPC message. Only flush once,
+ // when sending the very last message.
+ l := maxPerMessageWriteSize
+ flush := false
+ if len(data) <= l {
+ l = len(data)
+ flush = true
+ }
+ obj, err = w.streamSender.sendBuffer(ctx, data[:l], offset, flush, flush && doneReading)
+ if err != nil {
+ return nil, err
+ }
+ data = data[l:]
+ offset += int64(l)
+ if len(data) == 0 {
+ break
+ }
}
- // WriteObject doesn't support the generation condition, so use default.
- if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil {
- return nil, err
+ if w.flushInProgress {
+ w.flushInProgress = false
+ w.flushComplete <- offset
}
- return spec, nil
+ return
}
// read copies the data in the reader to the given buffer and reports how much
// data was read into the buffer and if there is no more data to read (EOF).
-// Furthermore, if the attrs.ContentType is unset, the first bytes of content
-// will be sniffed for a matching content type.
+// read returns when either 1. the buffer is full, 2. Writer.Flush was called,
+// or 3. Writer.Close was called.
func (w *gRPCWriter) read() (int, bool, error) {
- if w.attrs.ContentType == "" {
- w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader)
- }
// Set n to -1 to start the Read loop.
var n, recvd int = -1, 0
var err error
@@ -1867,12 +2980,37 @@ func (w *gRPCWriter) read() (int, bool, error) {
}
var done bool
if err == io.EOF {
- done = true
err = nil
+ // EOF can come from Writer.Flush or Writer.Close.
+ if w.flushInProgress {
+ // Reset pipe for additional writes after the flush.
+ pr, pw := io.Pipe()
+ w.reader = pr
+ w.pw = pw
+ w.setPipeWriter(pw)
+ } else {
+ done = true
+ }
}
return recvd, done, err
}
+// flush flushes the current buffer regardless of whether it is full or not.
+// It's the implementation for Writer.Flush.
+func (w *gRPCWriter) flush() (int64, error) {
+ if !w.append {
+ return 0, errors.New("Flush is supported only if Writer.Append is set to true")
+ }
+
+ // Close PipeWriter to trigger EOF on read side of the stream.
+ w.flushInProgress = true
+ w.pw.Close()
+
+ // Wait for flush to complete
+ offset := <-w.flushComplete
+ return offset, nil
+}
+
func checkCanceled(err error) error {
if status.Code(err) == codes.Canceled {
return context.Canceled
diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go
new file mode 100644
index 000000000..d34227334
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_dp.go
@@ -0,0 +1,22 @@
+//go:build !disable_grpc_modules
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ _ "google.golang.org/grpc/balancer/rls"
+ _ "google.golang.org/grpc/xds/googledirectpath"
+)
diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go
new file mode 100644
index 000000000..f7bebd1de
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go
@@ -0,0 +1,283 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric"
+ "github.com/google/uuid"
+ "go.opentelemetry.io/contrib/detectors/gcp"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats/opentelemetry"
+)
+
+const (
+ monitoredResourceName = "storage.googleapis.com/Client"
+ metricPrefix = "storage.googleapis.com/client/"
+)
+
+// Added to help with tests
+type storageMonitoredResource struct {
+ project string
+ api string
+ location string
+ instance string
+ cloudPlatform string
+ host string
+ resource *resource.Resource
+}
+
+func (smr *storageMonitoredResource) exporter() (metric.Exporter, error) {
+ exporter, err := mexporter.New(
+ mexporter.WithProjectID(smr.project),
+ mexporter.WithMetricDescriptorTypeFormatter(metricFormatter),
+ mexporter.WithCreateServiceTimeSeries(),
+ mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"}),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("storage: creating metrics exporter: %w", err)
+ }
+ return exporter, nil
+}
+
+func newStorageMonitoredResource(ctx context.Context, project, api string, opts ...resource.Option) (*storageMonitoredResource, error) {
+ detectedAttrs, err := resource.New(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ smr := &storageMonitoredResource{
+ instance: uuid.New().String(),
+ api: api,
+ project: project,
+ }
+ s := detectedAttrs.Set()
+ // Attempt to use resource detector project id if project id wasn't
+ // identified using ADC as a last resort. Otherwise metrics cannot be started.
+ if p, present := s.Value("cloud.account.id"); present && smr.project == "" {
+ smr.project = p.AsString()
+ } else if !present && smr.project == "" {
+ return nil, errors.New("google cloud project is required to start client-side metrics")
+ }
+ if v, ok := s.Value("cloud.region"); ok {
+ smr.location = v.AsString()
+ } else {
+ smr.location = "global"
+ }
+ if v, ok := s.Value("cloud.platform"); ok {
+ smr.cloudPlatform = v.AsString()
+ } else {
+ smr.cloudPlatform = "unknown"
+ }
+ if v, ok := s.Value("host.id"); ok {
+ smr.host = v.AsString()
+ } else if v, ok := s.Value("faas.id"); ok {
+ smr.host = v.AsString()
+ } else {
+ smr.host = "unknown"
+ }
+ smr.resource, err = resource.New(ctx, resource.WithAttributes([]attribute.KeyValue{
+ {Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)},
+ {Key: "project_id", Value: attribute.StringValue(smr.project)},
+ {Key: "api", Value: attribute.StringValue(smr.api)},
+ {Key: "instance_id", Value: attribute.StringValue(smr.instance)},
+ {Key: "location", Value: attribute.StringValue(smr.location)},
+ {Key: "cloud_platform", Value: attribute.StringValue(smr.cloudPlatform)},
+ {Key: "host_id", Value: attribute.StringValue(smr.host)},
+ }...))
+ if err != nil {
+ return nil, err
+ }
+ return smr, nil
+}
+
+type metricsContext struct {
+ // client options passed to gRPC channels
+ clientOpts []option.ClientOption
+ // instance of metric reader used by gRPC client-side metrics
+ provider *metric.MeterProvider
+ // clean func to call when closing gRPC client
+ close func()
+}
+
+type metricsConfig struct {
+ project string
+ interval time.Duration
+ customExporter *metric.Exporter
+ manualReader *metric.ManualReader // used by tests
+ disableExporter bool // used by tests disables exports
+ resourceOpts []resource.Option // used by tests
+}
+
+func newGRPCMetricContext(ctx context.Context, cfg metricsConfig) (*metricsContext, error) {
+ var exporter metric.Exporter
+ meterOpts := []metric.Option{}
+ if cfg.customExporter == nil {
+ var ropts []resource.Option
+ if cfg.resourceOpts != nil {
+ ropts = cfg.resourceOpts
+ } else {
+ ropts = []resource.Option{resource.WithDetectors(gcp.NewDetector())}
+ }
+ smr, err := newStorageMonitoredResource(ctx, cfg.project, "grpc", ropts...)
+ if err != nil {
+ return nil, err
+ }
+ exporter, err = smr.exporter()
+ if err != nil {
+ return nil, err
+ }
+ meterOpts = append(meterOpts, metric.WithResource(smr.resource))
+ } else {
+ exporter = *cfg.customExporter
+ }
+ interval := time.Minute
+ if cfg.interval > 0 {
+ interval = cfg.interval
+ }
+ meterOpts = append(meterOpts,
+ // Metric views update histogram boundaries to be relevant to GCS
+ // otherwise default OTel histogram boundaries are used.
+ metric.WithView(
+ createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries())),
+ )
+ if cfg.manualReader != nil {
+ meterOpts = append(meterOpts, metric.WithReader(cfg.manualReader))
+ }
+ if !cfg.disableExporter {
+ meterOpts = append(meterOpts, metric.WithReader(
+ metric.NewPeriodicReader(&exporterLogSuppressor{Exporter: exporter}, metric.WithInterval(interval))))
+ }
+ provider := metric.NewMeterProvider(meterOpts...)
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: stats.NewMetrics(
+ "grpc.client.attempt.started",
+ "grpc.client.attempt.duration",
+ "grpc.client.attempt.sent_total_compressed_message_size",
+ "grpc.client.attempt.rcvd_total_compressed_message_size",
+ "grpc.client.call.duration",
+ "grpc.lb.wrr.rr_fallback",
+ "grpc.lb.wrr.endpoint_weight_not_yet_usable",
+ "grpc.lb.wrr.endpoint_weight_stale",
+ "grpc.lb.wrr.endpoint_weights",
+ "grpc.lb.rls.cache_entries",
+ "grpc.lb.rls.cache_size",
+ "grpc.lb.rls.default_target_picks",
+ "grpc.lb.rls.target_picks",
+ "grpc.lb.rls.failed_picks",
+ ),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ opts := []option.ClientOption{
+ option.WithGRPCDialOption(
+ opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})),
+ option.WithGRPCDialOption(
+ grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})),
+ }
+ return &metricsContext{
+ clientOpts: opts,
+ provider: provider,
+ close: func() {
+ provider.Shutdown(ctx)
+ },
+ }, nil
+}
+
+// Silences permission errors after initial error is emitted to prevent
+// chatty logs.
+type exporterLogSuppressor struct {
+ metric.Exporter
+ emittedFailure bool
+}
+
+// Implements OTel SDK metric.Exporter interface to prevent noisy logs from
+// lack of credentials after initial failure.
+// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter
+func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if err := e.Exporter.Export(ctx, rm); err != nil && !e.emittedFailure {
+ if strings.Contains(err.Error(), "PermissionDenied") {
+ e.emittedFailure = true
+ return fmt.Errorf("gRPC metrics failed due permission issue: %w", err)
+ }
+ return err
+ }
+ return nil
+}
+
+func latencyHistogramBoundaries() []float64 {
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 0.002
+ // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range
+ for i := 0; i < 50; i++ {
+ boundaries = append(boundaries, boundary)
+ // increment by 2ms
+ boundary += increment
+ }
+ // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes
+ for i := 0; i < 150 && boundary < 300; i++ {
+ boundaries = append(boundaries, boundary)
+ if i != 0 && i%10 == 0 {
+ increment *= 2
+ }
+ boundary += increment
+ }
+ return boundaries
+}
+
+func sizeHistogramBoundaries() []float64 {
+ kb := 1024.0
+ mb := 1024.0 * kb
+ gb := 1024.0 * mb
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 128 * kb
+ // 128 KiB increments up to 4MiB, then exponential growth
+ for len(boundaries) < 200 && boundary <= 16*gb {
+ boundaries = append(boundaries, boundary)
+ boundary += increment
+ if boundary >= 4*mb {
+ increment *= 2
+ }
+ }
+ return boundaries
+}
+
+func createHistogramView(name string, boundaries []float64) metric.View {
+ return metric.NewView(metric.Instrument{
+ Name: name,
+ Kind: metric.InstrumentKindHistogram,
+ }, metric.Stream{
+ Name: name,
+ Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries},
+ })
+}
+
+func metricFormatter(m metricdata.Metrics) string {
+ return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/")
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_reader.go b/vendor/cloud.google.com/go/storage/grpc_reader.go
new file mode 100644
index 000000000..eaa35fea6
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_reader.go
@@ -0,0 +1,865 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+
+ "cloud.google.com/go/internal/trace"
+ "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+)
+
+// Below is the legacy implementation of gRPC downloads using the ReadObject API.
+// It's used by gRPC if the experimental option WithGRPCBidiReads was not passed.
+// TODO: once BidiReadObject is in GA, remove this implementation.
+
+// Custom codec to be used for unmarshaling ReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecReadObject struct {
+}
+
+var _ encoding.CodecV2 = bytesCodecReadObject{}
+
+// Marshal is used to encode messages to send for bytesCodecReadObject. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecReadObject) Marshal(v any) (mem.BufferSlice, error) {
+ vv, ok := v.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ }
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
+}
+
+// Unmarshal is used for data received for ReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecReadObject) Unmarshal(data mem.BufferSlice, v any) error {
+ switch v := v.(type) {
+ case *mem.BufferSlice:
+ *v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
+ return nil
+ case proto.Message:
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
+ default:
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
+ }
+}
+
+func (bytesCodecReadObject) Name() string {
+ return ""
+}
+
+func (c *grpcStorageClient) NewRangeReaderReadObject(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReaderReadObject")
+ defer func() { trace.EndSpan(ctx, err) }()
+
+ s := callSettings(c.settings, opts...)
+
+ s.gax = append(s.gax, gax.WithGRPCOptions(
+ grpc.ForceCodecV2(bytesCodecReadObject{}),
+ ))
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ b := bucketResourceName(globalProjectAlias, params.bucket)
+ req := &storagepb.ReadObjectRequest{
+ Bucket: b,
+ Object: params.object,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
+ }
+ // The default is a negative value, which means latest.
+ if params.gen >= 0 {
+ req.Generation = params.gen
+ }
+
+ // Define a function that initiates a Read with offset and length, assuming
+ // we have already read seen bytes.
+ reopen := func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error) {
+ // If the context has already expired, return immediately without making
+ // we call.
+ if err := ctx.Err(); err != nil {
+ return nil, nil, err
+ }
+
+ cc, cancel := context.WithCancel(ctx)
+
+ req.ReadOffset = params.offset + seen
+
+ // Only set a ReadLimit if length is greater than zero, because <= 0 means
+ // to read it all.
+ if params.length > 0 {
+ req.ReadLimit = params.length - seen
+ }
+
+ if err := applyCondsProto("gRPCReadObjectReader.reopen", params.gen, params.conds, req); err != nil {
+ cancel()
+ return nil, nil, err
+ }
+
+ var stream storagepb.Storage_ReadObjectClient
+ var err error
+ var decoder *readObjectResponseDecoder
+
+ err = run(cc, func(ctx context.Context) error {
+ stream, err = c.raw.ReadObject(ctx, req, s.gax...)
+ if err != nil {
+ return err
+ }
+
+ // Receive the message into databuf as a wire-encoded message so we can
+ // use a custom decoder to avoid an extra copy at the protobuf layer.
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
+ if err != nil {
+ // NotFound types of errors show up on the Recv call, rather than the
+ // initialization of the stream via ReadObject above.
+ return formatObjectErr(err)
+ }
+ // Use a custom decoder that uses protobuf unmarshalling for all
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readObjectResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
+ return err
+ }, s.retry, s.idempotent)
+ if err != nil {
+ // Close the stream context we just created to ensure we don't leak
+ // resources.
+ cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
+ return nil, nil, err
+ }
+
+ return &readStreamResponseReadObject{stream, decoder}, cancel, nil
+ }
+
+ res, cancel, err := reopen(0)
+ if err != nil {
+ return nil, err
+ }
+
+ // The first message was Recv'd on stream open, use it to populate the
+ // object metadata.
+ msg := res.decoder.msg
+ obj := msg.GetMetadata()
+ // This is the size of the entire object, even if only a range was requested.
+ size := obj.GetSize()
+
+ // Only support checksums when reading an entire object, not a range.
+ var (
+ wantCRC uint32
+ checkCRC bool
+ )
+ if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
+ }
+ wantCRC = checksums.GetCrc32C()
+ }
+
+ metadata := obj.GetMetadata()
+ r = &Reader{
+ Attrs: ReaderObjectAttrs{
+ Size: size,
+ ContentType: obj.GetContentType(),
+ ContentEncoding: obj.GetContentEncoding(),
+ CacheControl: obj.GetCacheControl(),
+ LastModified: obj.GetUpdateTime().AsTime(),
+ Metageneration: obj.GetMetageneration(),
+ Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
+ },
+ objectMetadata: &metadata,
+ reader: &gRPCReadObjectReader{
+ stream: res.stream,
+ reopen: reopen,
+ cancel: cancel,
+ size: size,
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
+ settings: s,
+ zeroRange: params.length == 0,
+ wantCRC: wantCRC,
+ checkCRC: checkCRC,
+ },
+ checkCRC: checkCRC,
+ }
+
+ cr := msg.GetContentRange()
+ if cr != nil {
+ r.Attrs.StartOffset = cr.GetStart()
+ r.remain = cr.GetEnd() - cr.GetStart()
+ } else {
+ r.remain = size
+ }
+
+ // For a zero-length request, explicitly close the stream and set remaining
+ // bytes to zero.
+ if params.length == 0 {
+ r.remain = 0
+ r.reader.Close()
+ }
+
+ return r, nil
+}
+
+type readStreamResponseReadObject struct {
+ stream storagepb.Storage_ReadObjectClient
+ decoder *readObjectResponseDecoder
+}
+
+type gRPCReadObjectReader struct {
+ seen, size int64
+ zeroRange bool
+ stream storagepb.Storage_ReadObjectClient
+ reopen func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error)
+ leftovers []byte
+ currMsg *readObjectResponseDecoder // decoder for the current message
+ cancel context.CancelFunc
+ settings *settings
+ checkCRC bool // should we check the CRC?
+ wantCRC uint32 // the CRC32c value the server sent in the header
+ gotCRC uint32 // running crc
+}
+
+// Update the running CRC with the data in the slice, if CRC checking was enabled.
+func (r *gRPCReadObjectReader) updateCRC(b []byte) {
+ if r.checkCRC {
+ r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b)
+ }
+}
+
+// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled.
+func (r *gRPCReadObjectReader) runCRCCheck() error {
+ if r.checkCRC && r.gotCRC != r.wantCRC {
+ return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC)
+ }
+ return nil
+}
+
+// Read reads bytes into the user's buffer from an open gRPC stream.
+func (r *gRPCReadObjectReader) Read(p []byte) (int, error) {
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return EOF.
+ if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
+ return 0, io.EOF
+ }
+
+ // No stream to read from, either never initialized or Close was called.
+ // Note: There is a potential concurrency issue if multiple routines are
+ // using the same reader. One encounters an error and the stream is closed
+ // and then reopened while the other routine attempts to read from it.
+ if r.stream == nil {
+ return 0, fmt.Errorf("storage: reader has been closed")
+ }
+
+ var n int
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
+ // interface: https://pkg.go.dev/io#Reader.
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
+ return n, nil
+ }
+
+ // Attempt to Recv the next message on the stream.
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
+ if err != nil {
+ return 0, err
+ }
+
+ // TODO: Determine if we need to capture incremental CRC32C for this
+ // chunk. The Object CRC32C checksum is captured when directed to read
+ // the entire Object. If directed to read a range, we may need to
+ // calculate the range's checksum for verification if the checksum is
+ // present in the response here.
+ // TODO: Figure out if we need to support decompressive transcoding
+ // https://cloud.google.com/storage/docs/transcoding.
+
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
+ return n, nil
+}
+
+// WriteTo writes all the data requested by the Reader into w, implementing
+// io.WriterTo.
+func (r *gRPCReadObjectReader) WriteTo(w io.Writer) (int64, error) {
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return nil.
+ if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+
+ // No stream to read from, either never initialized or Close was called.
+ // Note: There is a potential concurrency issue if multiple routines are
+ // using the same reader. One encounters an error and the stream is closed
+ // and then reopened while the other routine attempts to read from it.
+ if r.stream == nil {
+ return 0, fmt.Errorf("storage: reader has been closed")
+ }
+
+ // Track bytes written during before call.
+ var alreadySeen = r.seen
+
+ // Write any already received message to the stream. There will be some leftovers from the
+ // original NewRangeReaderReadObject call.
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ r.currMsg = nil
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+ // Loop and receive additional messages until the entire data is written.
+ for {
+ // Attempt to receive the next message on the stream.
+ // Will terminate with io.EOF once data has all come through.
+ // recv() handles stream reopening and retry logic so no need for retries here.
+ err := r.recv()
+ if err != nil {
+ if err == io.EOF {
+ // We are done; check the checksum if necessary and return.
+ err = r.runCRCCheck()
+ }
+ return r.seen - alreadySeen, err
+ }
+
+ // TODO: Determine if we need to capture incremental CRC32C for this
+ // chunk. The Object CRC32C checksum is captured when directed to read
+ // the entire Object. If directed to read a range, we may need to
+ // calculate the range's checksum for verification if the checksum is
+ // present in the response here.
+ // TODO: Figure out if we need to support decompressive transcoding
+ // https://cloud.google.com/storage/docs/transcoding.
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+}
+
+// Close cancels the read stream's context in order for it to be closed and
+// collected, and frees any currently in use buffers.
+func (r *gRPCReadObjectReader) Close() error {
+ if r.cancel != nil {
+ r.cancel()
+ }
+ r.stream = nil
+ r.currMsg = nil
+ return nil
+}
+
+// recv attempts to Recv the next message on the stream and extract the object
+// data that it contains. In the event that a retryable error is encountered,
+// the stream will be closed, reopened, and RecvMsg again.
+// This will attempt to Recv until one of the following is true:
+//
+// * Recv is successful
+// * A non-retryable error is encountered
+// * The Reader's context is canceled
+//
+// The last error received is the one that is returned, which could be from
+// an attempt to reopen the stream.
+func (r *gRPCReadObjectReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
+
+ var shouldRetry = ShouldRetry
+ if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
+ shouldRetry = r.settings.retry.shouldRetry
+ }
+ if err != nil && shouldRetry(err) {
+ // This will "close" the existing stream and immediately attempt to
+ // reopen the stream, but will backoff if further attempts are necessary.
+ // Reopening the stream Recvs the first message, so if retrying is
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
+ }
+
+ if err != nil {
+ return err
+ }
+
+ r.currMsg = &readObjectResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
+}
+
+// ReadObjectResponse field and subfield numbers.
+const (
+ checksummedDataFieldReadObject = protowire.Number(1)
+ checksummedDataContentFieldReadObject = protowire.Number(1)
+ checksummedDataCRC32CFieldReadObject = protowire.Number(2)
+ objectChecksumsFieldReadObject = protowire.Number(2)
+ contentRangeFieldReadObject = protowire.Number(3)
+ metadataFieldReadObject = protowire.Number(4)
+)
+
+// readObjectResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readObjectResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsetsReadObject // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsetsReadObject struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readObjectResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readObjectResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readObjectResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readObjectResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readObjectResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readObjectResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readObjectResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readObjectResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readObjectResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readObjectResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
+ if err != nil {
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
+ }
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readObjectResponseDecoder) consumeBytes() (bufferSliceOffsetsReadObject, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return bufferSliceOffsetsReadObject{}, fmt.Errorf("consuming bytes field: %w", err)
+ }
+ offsets := bufferSliceOffsetsReadObject{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
+ }
+
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
+
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readObjectResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
+}
+
+// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
+// wire-encoded message buffer b, or an error if the message is invalid.
+// This must be used on the first recv of an object as it may contain all fields
+// of ReadObjectResponse, and we use or pass on those fields to the user.
+// This function is essentially identical to proto.Unmarshal, except it aliases
+// the data in the input []byte. If the proto library adds a feature to
+// Unmarshal that does that, this function can be dropped.
+func (d *readObjectResponseDecoder) readFullObjectResponse() error {
+ msg := &storagepb.ReadObjectResponse{}
+
+ // Loop over the entire message, extracting fields as we go. This does not
+ // handle field concatenation, in which the contents of a single field
+ // are split across multiple protobuf tags.
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
+ }
+
+ // Unmarshal the field according to its type. Only fields that are not
+ // nil will be present.
+ switch {
+ case fieldNum == checksummedDataFieldReadObject && fieldType == protowire.BytesType:
+ // The ChecksummedData field was found. Initialize the struct.
+ msg.ChecksummedData = &storagepb.ChecksummedData{}
+
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
+ }
+
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
+ }
+
+ switch {
+ case gotNum == checksummedDataContentFieldReadObject && gotTyp == protowire.BytesType:
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err)
+ }
+ case gotNum == checksummedDataCRC32CFieldReadObject && gotTyp == protowire.Fixed32Type:
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err)
+ }
+ msg.ChecksummedData.Crc32C = &v
+ default:
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err)
+ }
+ }
+ }
+ case fieldNum == objectChecksumsFieldReadObject && fieldType == protowire.BytesType:
+ // The field was found. Initialize the struct.
+ msg.ObjectChecksums = &storagepb.ObjectChecksums{}
+ // Consume the bytes and copy them into a single buffer if they are split across buffers.
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err)
+ }
+ // Unmarshal.
+ if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil {
+ return err
+ }
+ case fieldNum == contentRangeFieldReadObject && fieldType == protowire.BytesType:
+ msg.ContentRange = &storagepb.ContentRange{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err)
+ }
+ if err := proto.Unmarshal(buf, msg.ContentRange); err != nil {
+ return err
+ }
+ case fieldNum == metadataFieldReadObject && fieldType == protowire.BytesType:
+ msg.Metadata = &storagepb.Object{}
+
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err)
+ }
+
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
+ }
+ default:
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse: %w", err)
+ }
+ }
+ }
+ d.msg = msg
+ return nil
+}
+
+// reopenStream "closes" the existing stream and attempts to reopen a stream and
+// sets the Reader's stream and cancelStream properties in the process.
+func (r *gRPCReadObjectReader) reopenStream() error {
+ // Close existing stream and initialize new stream with updated offset.
+ r.Close()
+
+ res, cancel, err := r.reopen(r.seen)
+ if err != nil {
+ return err
+ }
+ r.stream = res.stream
+ r.currMsg = res.decoder
+ r.cancel = cancel
+ return nil
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_writer.go b/vendor/cloud.google.com/go/storage/grpc_writer.go
new file mode 100644
index 000000000..2047cd23f
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_writer.go
@@ -0,0 +1,317 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ gapic "cloud.google.com/go/storage/internal/apiv2"
+ "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+const defaultWriteChunkRetryDeadline = 32 * time.Second
+
+type gRPCAppendBidiWriteBufferSender struct {
+ bucket string
+ routingToken *string
+ raw *gapic.Client
+ settings *settings
+ stream storagepb.Storage_BidiWriteObjectClient
+ firstMessage *storagepb.BidiWriteObjectRequest
+ objectChecksums *storagepb.ObjectChecksums
+
+ forceFirstMessage bool
+ progress func(int64)
+ flushOffset int64
+
+ // Fields used to report responses from the receive side of the stream
+ // recvs is closed when the current recv goroutine is complete. recvErr is set
+ // to the result of that stream (including io.EOF to indicate success)
+ recvs <-chan *storagepb.BidiWriteObjectResponse
+ recvErr error
+}
+
+func (w *gRPCWriter) newGRPCAppendBidiWriteBufferSender() (*gRPCAppendBidiWriteBufferSender, error) {
+ s := &gRPCAppendBidiWriteBufferSender{
+ bucket: w.spec.GetResource().GetBucket(),
+ raw: w.c.raw,
+ settings: w.c.settings,
+ firstMessage: &storagepb.BidiWriteObjectRequest{
+ FirstMessage: &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
+ WriteObjectSpec: w.spec,
+ },
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ },
+ objectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
+ forceFirstMessage: true,
+ progress: w.progress,
+ }
+ return s, nil
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) connect(ctx context.Context) (err error) {
+ err = func() error {
+ // If this is a forced first message, we've already determined it's safe to
+ // send.
+ if s.forceFirstMessage {
+ s.forceFirstMessage = false
+ return nil
+ }
+
+ // It's always ok to reconnect if there is a handle. This is the common
+ // case.
+ if s.firstMessage.GetAppendObjectSpec().GetWriteHandle() != nil {
+ return nil
+ }
+
+ // We can also reconnect if the first message has an if_generation_match or
+ // if_metageneration_match condition. Note that negative conditions like
+ // if_generation_not_match are not necessarily safe to retry.
+ aos := s.firstMessage.GetAppendObjectSpec()
+ wos := s.firstMessage.GetWriteObjectSpec()
+
+ if aos != nil && aos.IfMetagenerationMatch != nil {
+ return nil
+ }
+
+ if wos != nil && wos.IfGenerationMatch != nil {
+ return nil
+ }
+ if wos != nil && wos.IfMetagenerationMatch != nil {
+ return nil
+ }
+
+ // Otherwise, it is not safe to reconnect.
+ return errors.New("cannot safely reconnect; no write handle or preconditions")
+ }()
+ if err != nil {
+ return err
+ }
+
+ return s.startReceiver(ctx)
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) withRequestParams(ctx context.Context) context.Context {
+ param := fmt.Sprintf("appendable=true&bucket=%s", s.bucket)
+ if s.routingToken != nil {
+ param = param + fmt.Sprintf("&routing_token=%s", *s.routingToken)
+ }
+ return gax.InsertMetadataIntoOutgoingContext(ctx, "x-goog-request-params", param)
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) startReceiver(ctx context.Context) (err error) {
+ s.stream, err = s.raw.BidiWriteObject(s.withRequestParams(ctx), s.settings.gax...)
+ if err != nil {
+ return
+ }
+
+ recvs := make(chan *storagepb.BidiWriteObjectResponse)
+ s.recvs = recvs
+ s.recvErr = nil
+ go s.receiveMessages(recvs)
+ return
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) ensureFirstMessageAppendObjectSpec() {
+ if s.firstMessage.GetWriteObjectSpec() != nil {
+ w := s.firstMessage.GetWriteObjectSpec()
+ s.firstMessage.FirstMessage = &storagepb.BidiWriteObjectRequest_AppendObjectSpec{
+ AppendObjectSpec: &storagepb.AppendObjectSpec{
+ Bucket: w.GetResource().GetBucket(),
+ Object: w.GetResource().GetName(),
+ IfMetagenerationMatch: w.IfMetagenerationMatch,
+ IfMetagenerationNotMatch: w.IfMetagenerationNotMatch,
+ },
+ }
+ }
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) maybeUpdateFirstMessage(resp *storagepb.BidiWriteObjectResponse) {
+ // Any affirmative response should switch us to an AppendObjectSpec.
+ s.ensureFirstMessageAppendObjectSpec()
+
+ if r := resp.GetResource(); r != nil {
+ aos := s.firstMessage.GetAppendObjectSpec()
+ aos.Bucket = r.GetBucket()
+ aos.Object = r.GetName()
+ aos.Generation = r.GetGeneration()
+ }
+
+ if h := resp.GetWriteHandle(); h != nil {
+ s.firstMessage.GetAppendObjectSpec().WriteHandle = h
+ }
+}
+
+type bidiWriteObjectRedirectionError struct{}
+
+func (e bidiWriteObjectRedirectionError) Error() string {
+ return "BidiWriteObjectRedirectedError"
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) handleRedirectionError(e *storagepb.BidiWriteObjectRedirectedError) bool {
+ if e.RoutingToken == nil {
+ // This shouldn't happen, but we don't want to blindly retry here. Instead,
+ // surface the error to the caller.
+ return false
+ }
+
+ if e.WriteHandle != nil {
+ // If we get back a write handle, we should use it. We can only use it
+ // on an append object spec.
+ s.ensureFirstMessageAppendObjectSpec()
+ s.firstMessage.GetAppendObjectSpec().WriteHandle = e.WriteHandle
+ // Generation is meant to only come with the WriteHandle, so ignore it
+ // otherwise.
+ if e.Generation != nil {
+ s.firstMessage.GetAppendObjectSpec().Generation = e.GetGeneration()
+ }
+ }
+
+ s.routingToken = e.RoutingToken
+ return true
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) receiveMessages(resps chan<- *storagepb.BidiWriteObjectResponse) {
+ resp, err := s.stream.Recv()
+ for err == nil {
+ s.maybeUpdateFirstMessage(resp)
+
+ if resp.WriteStatus != nil {
+ // We only get a WriteStatus if this was a solicited message (either
+ // state_lookup: true or finish_write: true). Unsolicited messages may
+ // arrive to update our handle if necessary. We don't want to block on
+ // this channel write if this was an unsolicited message.
+ resps <- resp
+ }
+
+ resp, err = s.stream.Recv()
+ }
+
+ if st, ok := status.FromError(err); ok && st.Code() == codes.Aborted {
+ for _, d := range st.Details() {
+ if e, ok := d.(*storagepb.BidiWriteObjectRedirectedError); ok {
+ // If we can handle this error, replace it with the sentinel. Otherwise,
+ // report it to the user.
+ if ok := s.handleRedirectionError(e); ok {
+ err = bidiWriteObjectRedirectionError{}
+ }
+ }
+ }
+ }
+
+ // TODO: automatically reconnect on retriable recv errors, even if there are
+ // no sends occurring.
+ s.recvErr = err
+ close(resps)
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) sendOnConnectedStream(buf []byte, offset int64, flush, finishWrite, sendFirstMessage bool) (obj *storagepb.Object, err error) {
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if finishWrite {
+ // appendable objects pass checksums on the last message only
+ req.ObjectChecksums = s.objectChecksums
+ }
+ if sendFirstMessage {
+ proto.Merge(req, s.firstMessage)
+ }
+
+ if err = s.stream.Send(req); err != nil {
+ return nil, err
+ }
+
+ if finishWrite {
+ s.stream.CloseSend()
+ for resp := range s.recvs {
+ if resp.GetResource() != nil {
+ obj = resp.GetResource()
+ }
+ }
+ if s.recvErr != io.EOF {
+ return nil, s.recvErr
+ }
+ if obj.GetSize() > s.flushOffset {
+ s.flushOffset = obj.GetSize()
+ s.progress(s.flushOffset)
+ }
+ return
+ }
+
+ if flush {
+ // We don't necessarily expect multiple responses for a single flush, but
+ // this allows the server to send multiple responses if it wants to.
+ flushOffset := s.flushOffset
+ for flushOffset < offset+int64(len(buf)) {
+ resp, ok := <-s.recvs
+ if !ok {
+ return nil, s.recvErr
+ }
+ pSize := resp.GetPersistedSize()
+ rSize := resp.GetResource().GetSize()
+ if flushOffset < pSize {
+ flushOffset = pSize
+ }
+ if flushOffset < rSize {
+ flushOffset = rSize
+ }
+ }
+ if s.flushOffset < flushOffset {
+ s.flushOffset = flushOffset
+ s.progress(s.flushOffset)
+ }
+ }
+
+ return
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) sendBuffer(ctx context.Context, buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ for {
+ sendFirstMessage := false
+ if s.stream == nil {
+ sendFirstMessage = true
+ if err = s.connect(ctx); err != nil {
+ return
+ }
+ }
+
+ obj, err = s.sendOnConnectedStream(buf, offset, flush, finishWrite, sendFirstMessage)
+ if err == nil {
+ return
+ }
+
+ // await recv stream termination
+ for range s.recvs {
+ }
+ if s.recvErr != io.EOF {
+ err = s.recvErr
+ }
+ s.stream = nil
+
+ // Retry transparently on a redirection error
+ if _, ok := err.(bidiWriteObjectRedirectionError); ok {
+ s.forceFirstMessage = true
+ continue
+ }
+
+ return
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go
index 1b9fbe9dd..2387fd33c 100644
--- a/vendor/cloud.google.com/go/storage/hmac.go
+++ b/vendor/cloud.google.com/go/storage/hmac.go
@@ -20,7 +20,6 @@ import (
"fmt"
"time"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
@@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
//
// Options such as UserProjectForHMACKeys can be used to set the
// userProject to be billed against for operations.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
return hmKey, nil
}
-func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
- if pbmd == nil {
- return nil
- }
-
- return &HMACKey{
- AccessID: pbmd.GetAccessId(),
- ID: pbmd.GetId(),
- State: HMACState(pbmd.GetState()),
- ProjectID: pbmd.GetProject(),
- CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
- UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
- ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
- }
-}
-
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
+// Note: gRPC is not supported.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
@@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct {
}
// Update mutates the HMACKey referred to by accessID.
+// Note: gRPC is not supported.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
@@ -237,6 +224,7 @@ type HMACKeysIterator struct {
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
+// Note: gRPC is not supported.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -272,7 +260,6 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
// TODO: Remove fetch method upon integration. This method is internalized into
// httpStorageClient.ListHMACKeys() as it is the only caller.
call := it.raw.List(it.projectID)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index 0e157e4ba..46f34769d 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -19,8 +19,10 @@ import (
"encoding/base64"
"errors"
"fmt"
+ "hash/crc32"
"io"
"io/ioutil"
+ "log"
"net/http"
"net/url"
"os"
@@ -46,13 +48,14 @@ import (
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
// storageClient interface.
type httpStorageClient struct {
- creds *google.Credentials
- hc *http.Client
- xmlHost string
- raw *raw.Service
- scheme string
- settings *settings
- config *storageConfig
+ creds *google.Credentials
+ hc *http.Client
+ xmlHost string
+ raw *raw.Service
+ scheme string
+ settings *settings
+ config *storageConfig
+ dynamicReadReqStallTimeout *bucketDelayManager
}
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
@@ -74,9 +77,10 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
// Prepend default options to avoid overriding options passed by the user.
o = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, o...)
- o = append(o, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
- o = append(o, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
-
+ o = append(o, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"),
+ internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ )
// Don't error out here. The user may have passed in their own HTTP
// client which does not auth with ADC or other common conventions.
c, err := transport.Creds(ctx, o...)
@@ -105,12 +109,12 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
// Append the emulator host as default endpoint for the user
o = append([]option.ClientOption{option.WithoutAuthentication()}, o...)
- o = append(o, internaloption.WithDefaultEndpoint(endpoint))
+ o = append(o, internaloption.WithDefaultEndpointTemplate(endpoint))
o = append(o, internaloption.WithDefaultMTLSEndpoint(endpoint))
}
s.clientOption = o
- // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
+ // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, fmt.Errorf("dialing: %w", err)
@@ -126,14 +130,29 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
}
+ var bd *bucketDelayManager
+ if config.readStallTimeoutConfig != nil {
+ drrstConfig := config.readStallTimeoutConfig
+ bd, err = newBucketDelayManager(
+ drrstConfig.TargetPercentile,
+ getDynamicReadReqIncreaseRateFromEnv(),
+ getDynamicReadReqInitialTimeoutSecFromEnv(drrstConfig.Min),
+ drrstConfig.Min,
+ defaultDynamicReqdReqMaxTimeout)
+ if err != nil {
+ return nil, fmt.Errorf("creating dynamic-delay: %w", err)
+ }
+ }
+
return &httpStorageClient{
- creds: creds,
- hc: hc,
- xmlHost: u.Host,
- raw: rawService,
- scheme: u.Scheme,
- settings: s,
- config: &config,
+ creds: creds,
+ hc: hc,
+ xmlHost: u.Host,
+ raw: rawService,
+ scheme: u.Scheme,
+ settings: s,
+ config: &config,
+ dynamicReadReqStallTimeout: bd,
}, nil
}
@@ -174,7 +193,6 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st
bkt.Location = "US"
}
req := c.raw.Buckets.Insert(project, bkt)
- setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
@@ -205,7 +223,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
fetch := func(pageSize int, pageToken string) (token string, err error) {
req := c.raw.Buckets.List(it.projectID)
- setClientHeader(req.Header())
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
@@ -243,7 +260,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Delete(bucket)
- setClientHeader(req.Header())
if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
@@ -257,7 +273,6 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con
func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Get(bucket).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.GetBucket", conds, req)
if err != nil {
return nil, err
@@ -272,12 +287,8 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds
return err
}, s.retry, s.idempotent)
- var e *googleapi.Error
- if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
- return nil, ErrBucketNotExist
- }
if err != nil {
- return nil, err
+ return nil, formatBucketError(err)
}
return newBucket(resp)
}
@@ -285,7 +296,6 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
s := callSettings(c.settings, opts...)
rb := uattrs.toRawBucket()
req := c.raw.Buckets.Patch(bucket, rb).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req)
if err != nil {
return nil, err
@@ -335,7 +345,9 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
}
fetch := func(pageSize int, pageToken string) (string, error) {
req := c.raw.Objects.List(bucket)
- setClientHeader(req.Header())
+ if it.query.SoftDeleted {
+ req.SoftDeleted(it.query.SoftDeleted)
+ }
projection := it.query.Projection
if projection == ProjectionDefault {
projection = ProjectionFull
@@ -348,6 +360,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
req.Versions(it.query.Versions)
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
req.MatchGlob(it.query.MatchGlob)
+ req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes)
if selection := it.query.toFieldSelection(); selection != "" {
req.Fields("nextPageToken", googleapi.Field(selection))
}
@@ -365,11 +378,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
return err
}, s.retry, s.idempotent)
if err != nil {
- var e *googleapi.Error
- if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
- err = ErrBucketNotExist
- }
- return "", err
+ return "", formatBucketError(err)
}
for _, item := range resp.Items {
it.items = append(it.items, newObject(item))
@@ -399,37 +408,33 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str
req.UserProject(s.userProject)
}
err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent)
- var e *googleapi.Error
- if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
- return ErrObjectNotExist
- }
- return err
+ return formatObjectErr(err)
}
-func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
+func (c *httpStorageClient) GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
s := callSettings(c.settings, opts...)
- req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx)
- if err := applyConds("Attrs", gen, conds, req); err != nil {
+ req := c.raw.Objects.Get(params.bucket, params.object).Projection("full").Context(ctx)
+ if err := applyConds("Attrs", params.gen, params.conds, req); err != nil {
return nil, err
}
if s.userProject != "" {
req.UserProject(s.userProject)
}
- if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil {
+ if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
return nil, err
}
+ if params.softDeleted {
+ req.SoftDeleted(params.softDeleted)
+ }
+
var obj *raw.Object
var err error
err = run(ctx, func(ctx context.Context) error {
obj, err = req.Context(ctx).Do()
return err
}, s.retry, s.idempotent)
- var e *googleapi.Error
- if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
- return nil, ErrObjectNotExist
- }
if err != nil {
- return nil, err
+ return nil, formatObjectErr(err)
}
return newObject(obj), nil
}
@@ -534,14 +539,60 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObje
var obj *raw.Object
var err error
err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent)
- var e *googleapi.Error
- if errors.As(err, &e) && e.Code == http.StatusNotFound {
- return nil, ErrObjectNotExist
+ if err != nil {
+ return nil, formatObjectErr(err)
+ }
+ return newObject(obj), nil
+}
+
+func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := c.raw.Objects.Restore(params.bucket, params.object, params.gen).Context(ctx)
+ // Do not set the generation here since it's not an optional condition; it gets set above.
+ if err := applyConds("RestoreObject", defaultGen, params.conds, req); err != nil {
+ return nil, err
+ }
+ if s.userProject != "" {
+ req.UserProject(s.userProject)
+ }
+ if params.copySourceACL {
+ req.CopySourceAcl(params.copySourceACL)
+ }
+ if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
+ return nil, err
}
+
+ var obj *raw.Object
+ var err error
+ err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
if err != nil {
+ return nil, formatObjectErr(err)
+ }
+ return newObject(obj), err
+}
+
+func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := c.raw.Objects.Move(params.bucket, params.srcObject, params.dstObject).Context(ctx)
+ if err := applyConds("MoveObjectDestination", defaultGen, params.dstConds, req); err != nil {
return nil, err
}
- return newObject(obj), nil
+ if err := applySourceConds("MoveObjectSource", defaultGen, params.srcConds, req); err != nil {
+ return nil, err
+ }
+ if s.userProject != "" {
+ req.UserProject(s.userProject)
+ }
+ if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
+ return nil, err
+ }
+ var obj *raw.Object
+ var err error
+ err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
+ if err != nil {
+ return nil, formatObjectErr(err)
+ }
+ return newObject(obj), err
}
// Default Object ACL methods.
@@ -629,7 +680,7 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
}, s.retry, s.idempotent)
}
-// configureACLCall sets the context, user project and headers on the apiary library call.
+// configureACLCall sets the context and user project on the apiary library call.
// This will panic if the call does not have the correct methods.
func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
@@ -637,7 +688,6 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H
if userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)})
}
- setClientHeader(call.Header())
}
// Object ACL methods.
@@ -723,13 +773,12 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
return nil, err
}
var obj *raw.Object
- setClientHeader(call.Header())
var err error
retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }
if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil {
- return nil, err
+ return nil, formatObjectErr(err)
}
return newObject(obj), nil
}
@@ -751,7 +800,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
return nil, err
}
- if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil {
+ if err := applySourceConds("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil {
return nil, err
}
if s.userProject != "" {
@@ -772,12 +821,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
var res *raw.RewriteResponse
var err error
- setClientHeader(call.Header())
retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err }
if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil {
- return nil, err
+ return nil, formatObjectErr(err)
}
r := &rewriteObjectResponse{
@@ -791,6 +839,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
+// NewMultiRangeDownloader is not supported by http client.
+func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
+ return nil, errMethodNotSupported
+}
+
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
@@ -827,17 +880,52 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
return nil, err
}
- // Set custom headers passed in via the context. This is only required for XML;
- // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
- ctxHeaders := callctx.HeadersFromContext(ctx)
- for k, vals := range ctxHeaders {
- for _, v := range vals {
- req.Header.Add(k, v)
- }
- }
-
reopen := readerReopen(ctx, req.Header, params, s,
- func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) },
+ func(ctx context.Context) (*http.Response, error) {
+ setHeadersFromCtx(ctx, req.Header)
+
+ if c.dynamicReadReqStallTimeout == nil {
+ return c.hc.Do(req.WithContext(ctx))
+ }
+
+ cancelCtx, cancel := context.WithCancel(ctx)
+ var (
+ res *http.Response
+ err error
+ )
+
+ done := make(chan bool)
+ go func() {
+ reqStartTime := time.Now()
+ res, err = c.hc.Do(req.WithContext(cancelCtx))
+ if err == nil {
+ reqLatency := time.Since(reqStartTime)
+ c.dynamicReadReqStallTimeout.update(params.bucket, reqLatency)
+ } else if errors.Is(err, context.Canceled) {
+ // context.Canceled means operation took more than current dynamicTimeout,
+ // hence should be increased.
+ c.dynamicReadReqStallTimeout.increase(params.bucket)
+ }
+ done <- true
+ }()
+
+ // Wait until stall timeout or request is successful.
+ stallTimeout := c.dynamicReadReqStallTimeout.getValue(params.bucket)
+ timer := time.After(stallTimeout)
+ select {
+ case <-timer:
+ log.Printf("stalled read-req (%p) cancelled after %fs", req, stallTimeout.Seconds())
+ cancel()
+ <-done
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
+ return res, context.DeadlineExceeded
+ case <-done:
+ cancel = nil
+ }
+ return res, err
+ },
func() error { return setConditionsHeaders(req.Header, params.conds) },
func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) })
@@ -851,7 +939,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) {
call := c.raw.Objects.Get(params.bucket, params.object)
- setClientHeader(call.Header())
call.Projection("full")
if s.userProject != "" {
@@ -874,21 +961,31 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR
}
func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
+ if params.append {
+ return nil, errors.New("storage: append not supported on HTTP Client; use gRPC")
+ }
+
s := callSettings(c.settings, opts...)
errorf := params.setError
setObj := params.setObj
progress := params.progress
attrs := params.attrs
+ params.setFlush(func() (int64, error) {
+ return 0, errors.New("Writer.Flush is only supported for gRPC-based clients")
+ })
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(params.chunkSize),
}
- if c := attrs.ContentType; c != "" {
+ if c := attrs.ContentType; c != "" || params.forceEmptyContentType {
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
}
if params.chunkRetryDeadline != 0 {
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline))
}
+ if params.chunkTransferTimeout != 0 {
+ mediaOpts = append(mediaOpts, googleapi.ChunkTransferTimeout(params.chunkTransferTimeout))
+ }
pr, pw := io.Pipe()
@@ -967,7 +1064,6 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version))
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -988,7 +1084,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
rp := iamToStoragePolicy(policy)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1002,7 +1097,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.TestIamPermissions(resource, permissions)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1051,7 +1145,6 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
call := c.raw.Projects.HmacKeys.List(project)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
@@ -1162,9 +1255,6 @@ func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, a
// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket,
// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets.
func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications")
- defer func() { trace.EndSpan(ctx, err) }()
-
s := callSettings(c.settings, opts...)
call := c.raw.Notifications.List(bucket)
if s.userProject != "" {
@@ -1182,9 +1272,6 @@ func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string
}
func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
s := callSettings(c.settings, opts...)
call := c.raw.Notifications.Insert(bucket, toRawNotification(n))
if s.userProject != "" {
@@ -1202,9 +1289,6 @@ func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket strin
}
func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
s := callSettings(c.settings, opts...)
call := c.raw.Notifications.Delete(bucket, id)
if s.userProject != "" {
@@ -1216,9 +1300,12 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin
}
type httpReader struct {
- body io.ReadCloser
- seen int64
- reopen func(seen int64) (*http.Response, error)
+ body io.ReadCloser
+ seen int64
+ reopen func(seen int64) (*http.Response, error)
+ checkCRC bool // should we check the CRC?
+ wantCRC uint32 // the CRC32c value the server sent in the header
+ gotCRC uint32 // running crc
}
func (r *httpReader) Read(p []byte) (int, error) {
@@ -1227,7 +1314,22 @@ func (r *httpReader) Read(p []byte) (int, error) {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
- if err == nil || err == io.EOF {
+ if r.checkCRC {
+ r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
+ }
+ if err == nil {
+ return n, nil
+ }
+ if err == io.EOF {
+ // Check CRC here. It would be natural to check it in Close, but
+ // everybody defers Close on the assumption that it doesn't return
+ // anything worth looking at.
+ if r.checkCRC {
+ if r.gotCRC != r.wantCRC {
+ return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
+ r.gotCRC, r.wantCRC)
+ }
+ }
return n, err
}
// Read failed (likely due to connection issues), but we will try to reopen
@@ -1291,13 +1393,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
err = run(ctx, func(ctx context.Context) error {
res, err = doDownload(ctx)
if err != nil {
- var e *googleapi.Error
- if errors.As(err, &e) {
- if e.Code == http.StatusNotFound {
- return ErrObjectNotExist
- }
- }
- return err
+ return formatObjectErr(err)
}
if res.StatusCode == http.StatusNotFound {
@@ -1306,7 +1402,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
- body, _ := ioutil.ReadAll(res.Body)
+ body, _ := io.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
@@ -1380,18 +1476,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
} else {
size = res.ContentLength
- // Check the CRC iff all of the following hold:
- // - We asked for content (length != 0).
- // - We got all the content (status != PartialContent).
- // - The server sent a CRC header.
- // - The Go http stack did not uncompress the file.
- // - We were not served compressed data that was uncompressed on download.
- // The problem with the last two cases is that the CRC will not match -- GCS
- // computes it on the compressed contents, but we compute it on the
- // uncompressed contents.
- if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
- crc, checkCRC = parseCRC32c(res)
- }
+ }
+
+ // Check the CRC iff all of the following hold:
+ // - We asked for content (length != 0).
+ // - We got all the content (status != PartialContent).
+ // - The server sent a CRC header.
+ // - The Go http stack did not uncompress the file.
+ // - We were not served compressed data that was uncompressed on download.
+ // The problem with the last two cases is that the CRC will not match -- GCS
+ // computes it on the compressed contents, but we compute it on the
+ // uncompressed contents.
+ crc, checkCRC = parseCRC32c(res)
+ if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) {
+ checkCRC = false
}
remain := res.ContentLength
@@ -1419,6 +1517,14 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
}
+ metadata := map[string]string{}
+ for key, values := range res.Header {
+ if len(values) > 0 && strings.HasPrefix(key, "X-Goog-Meta-") {
+ key := key[len("X-Goog-Meta-"):]
+ metadata[key] = values[0]
+ }
+ }
+
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
@@ -1428,16 +1534,47 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
StartOffset: startOffset,
Generation: params.gen,
Metageneration: metaGen,
+ CRC32C: crc,
+ Decompressed: res.Uncompressed || uncompressedByServer(res),
}
return &Reader{
- Attrs: attrs,
- size: size,
- remain: remain,
- wantCRC: crc,
- checkCRC: checkCRC,
+ Attrs: attrs,
+ objectMetadata: &metadata,
+ size: size,
+ remain: remain,
+ checkCRC: checkCRC,
reader: &httpReader{
- reopen: reopen,
- body: body,
+ reopen: reopen,
+ body: body,
+ wantCRC: crc,
+ checkCRC: checkCRC,
},
}, nil
}
+
+// setHeadersFromCtx sets custom headers passed in via the context on the header,
+// replacing any header with the same key (which avoids duplicating invocation headers).
+// This is only required for XML; for gRPC & JSON requests this is handled in
+// the GAPIC and Apiary layers respectively.
+func setHeadersFromCtx(ctx context.Context, header http.Header) {
+ ctxHeaders := callctx.HeadersFromContext(ctx)
+ for k, vals := range ctxHeaders {
+ // Merge x-goog-api-client values into a single space-separated value.
+ if strings.EqualFold(k, xGoogHeaderKey) {
+ alreadySetValues := header.Values(xGoogHeaderKey)
+ vals = append(vals, alreadySetValues...)
+
+ if len(vals) > 0 {
+ xGoogHeader := vals[0]
+ for _, v := range vals[1:] {
+ xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
+ }
+ header.Set(k, xGoogHeader)
+ }
+ } else {
+ for _, v := range vals {
+ header.Set(k, v)
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go
index 4c01bff4f..9c9a3a872 100644
--- a/vendor/cloud.google.com/go/storage/iam.go
+++ b/vendor/cloud.google.com/go/storage/iam.go
@@ -19,7 +19,6 @@ import (
"cloud.google.com/go/iam"
"cloud.google.com/go/iam/apiv1/iampb"
- "cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
"google.golang.org/genproto/googleapis/type/expr"
)
@@ -45,16 +44,16 @@ func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy,
}
func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "storage.IAM.Get")
+ defer func() { endSpan(ctx, err) }()
o := makeStorageOpts(true, c.retry, c.userProject)
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "storage.IAM.Set")
+ defer func() { endSpan(ctx, err) }()
isIdempotent := len(p.Etag) > 0
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
@@ -62,8 +61,8 @@ func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "storage.IAM.Test")
+ defer func() { endSpan(ctx, err) }()
o := makeStorageOpts(true, c.retry, c.userProject)
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
index c6fd4b341..03c3f8c17 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ type BucketIterator struct {
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error)
}
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
@@ -68,100 +68,6 @@ func (it *BucketIterator) takeBuf() interface{} {
return b
}
-// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata.
-type HmacKeyMetadataIterator struct {
- items []*storagepb.HmacKeyMetadata
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) {
- var item *storagepb.HmacKeyMetadata
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *HmacKeyMetadataIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *HmacKeyMetadataIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig.
-type NotificationConfigIterator struct {
- items []*storagepb.NotificationConfig
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) {
- var item *storagepb.NotificationConfig
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *NotificationConfigIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *NotificationConfigIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
// ObjectIterator manages a stream of *storagepb.Object.
type ObjectIterator struct {
items []*storagepb.Object
@@ -182,7 +88,7 @@ type ObjectIterator struct {
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error)
}
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
func (it *ObjectIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
new file mode 100644
index 000000000..a51532f60
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
@@ -0,0 +1,38 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package storage
+
+import (
+ "iter"
+
+ storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
index 8159589ea..502fa5678 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,17 +17,15 @@
// Package storage is an auto-generated package for the
// Cloud Storage API.
//
-// Stop. This folder is likely not what you are looking for. This folder
-// contains protocol buffer definitions for an unreleased API for accessing
-// Cloud Storage. Unless told otherwise by a Google Cloud representative, do
-// not use any of the contents of this folder. If you would like to use Cloud
-// Storage, please consult our official documentation (at
+// This folder contains protocol buffer definitions for an API only
+// accessible to select customers. Customers not participating should not
+// depend on this file. Please contact Google Cloud sales if you are
+// interested. Unless told otherwise by a Google Cloud representative, do not
+// use or otherwise rely on any of the contents of this folder. If you would
+// like to use Cloud Storage, please consult our official documentation (at
// https://cloud.google.com/storage/docs/apis) for details on our XML and
// JSON APIs, or else consider one of our client libraries (at
-// https://cloud.google.com/storage/docs/reference/libraries). This API
-// defined in this folder is unreleased and may shut off, break, or fail at
-// any time for any users who are not registered as a part of a private
-// preview program.
+// https://cloud.google.com/storage/docs/reference/libraries).
//
// # General documentation
//
@@ -45,6 +43,7 @@
//
// To get started with this package, create a client.
//
+// // go get cloud.google.com/go/storage/internal/apiv2@latest
// ctx := context.Background()
// // This snippet has been automatically generated and should be regarded as a code template only.
// // It will require modifications to work:
@@ -63,25 +62,14 @@
//
// # Using the Client
//
-// The following is an example of making an API call with the newly created client.
+// The following is an example of making an API call with the newly created client, mentioned above.
//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := storage.NewClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-// stream, err := c.BidiWriteObject(ctx)
+// stream, err := c.BidiReadObject(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// go func() {
-// reqs := []*storagepb.BidiWriteObjectRequest{
+// reqs := []*storagepb.BidiReadObjectRequest{
// // TODO: Create requests.
// }
// for _, req := range reqs {
@@ -117,34 +105,3 @@
// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
package storage // import "cloud.google.com/go/storage/internal/apiv2"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write",
- }
-}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
index 56256bb2c..7e4d99ec9 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
@@ -10,6 +10,11 @@
"grpc": {
"libraryClient": "Client",
"rpcs": {
+ "BidiReadObject": {
+ "methods": [
+ "BidiReadObject"
+ ]
+ },
"BidiWriteObject": {
"methods": [
"BidiWriteObject"
@@ -30,31 +35,11 @@
"CreateBucket"
]
},
- "CreateHmacKey": {
- "methods": [
- "CreateHmacKey"
- ]
- },
- "CreateNotificationConfig": {
- "methods": [
- "CreateNotificationConfig"
- ]
- },
"DeleteBucket": {
"methods": [
"DeleteBucket"
]
},
- "DeleteHmacKey": {
- "methods": [
- "DeleteHmacKey"
- ]
- },
- "DeleteNotificationConfig": {
- "methods": [
- "DeleteNotificationConfig"
- ]
- },
"DeleteObject": {
"methods": [
"DeleteObject"
@@ -65,46 +50,21 @@
"GetBucket"
]
},
- "GetHmacKey": {
- "methods": [
- "GetHmacKey"
- ]
- },
"GetIamPolicy": {
"methods": [
"GetIamPolicy"
]
},
- "GetNotificationConfig": {
- "methods": [
- "GetNotificationConfig"
- ]
- },
"GetObject": {
"methods": [
"GetObject"
]
},
- "GetServiceAccount": {
- "methods": [
- "GetServiceAccount"
- ]
- },
"ListBuckets": {
"methods": [
"ListBuckets"
]
},
- "ListHmacKeys": {
- "methods": [
- "ListHmacKeys"
- ]
- },
- "ListNotificationConfigs": {
- "methods": [
- "ListNotificationConfigs"
- ]
- },
"ListObjects": {
"methods": [
"ListObjects"
@@ -115,6 +75,11 @@
"LockBucketRetentionPolicy"
]
},
+ "MoveObject": {
+ "methods": [
+ "MoveObject"
+ ]
+ },
"QueryWriteStatus": {
"methods": [
"QueryWriteStatus"
@@ -155,11 +120,6 @@
"UpdateBucket"
]
},
- "UpdateHmacKey": {
- "methods": [
- "UpdateHmacKey"
- ]
- },
"UpdateObject": {
"methods": [
"UpdateObject"
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
new file mode 100644
index 000000000..0de9b31f6
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
@@ -0,0 +1,65 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package storage
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "storage.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write",
+ }
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
index 648199506..4a50254d8 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@ package storage
import (
"context"
"fmt"
+ "log/slog"
"math"
"net/url"
"regexp"
@@ -50,16 +51,13 @@ type CallOptions struct {
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
UpdateBucket []gax.CallOption
- DeleteNotificationConfig []gax.CallOption
- GetNotificationConfig []gax.CallOption
- CreateNotificationConfig []gax.CallOption
- ListNotificationConfigs []gax.CallOption
ComposeObject []gax.CallOption
DeleteObject []gax.CallOption
RestoreObject []gax.CallOption
CancelResumableWrite []gax.CallOption
GetObject []gax.CallOption
ReadObject []gax.CallOption
+ BidiReadObject []gax.CallOption
UpdateObject []gax.CallOption
WriteObject []gax.CallOption
BidiWriteObject []gax.CallOption
@@ -67,21 +65,19 @@ type CallOptions struct {
RewriteObject []gax.CallOption
StartResumableWrite []gax.CallOption
QueryWriteStatus []gax.CallOption
- GetServiceAccount []gax.CallOption
- CreateHmacKey []gax.CallOption
- DeleteHmacKey []gax.CallOption
- GetHmacKey []gax.CallOption
- ListHmacKeys []gax.CallOption
- UpdateHmacKey []gax.CallOption
+ MoveObject []gax.CallOption
}
func defaultGRPCClientOptions() []option.ClientOption {
return []option.ClientOption{
internaloption.WithDefaultEndpoint("storage.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("storage.UNIVERSE_DOMAIN:443"),
internaloption.WithDefaultMTLSEndpoint("storage.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://storage.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -206,46 +202,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- GetNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- CreateNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- ListNotificationConfigs: []gax.CallOption{
+ ComposeObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -258,7 +215,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ComposeObject: []gax.CallOption{
+ DeleteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -271,7 +228,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteObject: []gax.CallOption{
+ RestoreObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -284,7 +241,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RestoreObject: []gax.CallOption{
+ CancelResumableWrite: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -297,7 +254,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CancelResumableWrite: []gax.CallOption{
+ GetObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -310,8 +267,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ ReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -323,7 +279,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ReadObject: []gax.CallOption{
+ BidiReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -424,72 +380,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetServiceAccount: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- CreateHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- DeleteHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- GetHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- ListHmacKeys: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- UpdateHmacKey: []gax.CallOption{
+ MoveObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -519,16 +410,13 @@ type internalClient interface {
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error)
- DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
- GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error)
GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
+ BidiReadObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error)
UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error)
BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error)
@@ -536,12 +424,7 @@ type internalClient interface {
RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error)
StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error)
QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error)
- GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error)
- CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error)
- DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error
- GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
- ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator
- UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
+ MoveObject(context.Context, *storagepb.MoveObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
}
// Client is a client for interacting with Cloud Storage API.
@@ -625,27 +508,27 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L
return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...)
}
-// GetIamPolicy gets the IAM policy for a specified bucket or object.
+// GetIamPolicy gets the IAM policy for a specified bucket.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket}.
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
return c.internalClient.GetIamPolicy(ctx, req, opts...)
}
-// SetIamPolicy updates an IAM policy for the specified bucket or object.
+// SetIamPolicy updates an IAM policy for the specified bucket.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket}.
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
return c.internalClient.SetIamPolicy(ctx, req, opts...)
}
-// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
-// any, are held by the caller.
+// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder
+// to see which, if any, are held by the caller.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket} for a bucket,
+// projects/_/buckets/{bucket}/objects/{object} for an object, or
+// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
+// for a managed folder.
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
@@ -655,41 +538,32 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe
return c.internalClient.UpdateBucket(ctx, req, opts...)
}
-// DeleteNotificationConfig permanently deletes a NotificationConfig.
-func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
-}
-
-// GetNotificationConfig view a NotificationConfig.
-func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.GetNotificationConfig(ctx, req, opts...)
-}
-
-// CreateNotificationConfig creates a NotificationConfig for a given bucket.
-// These NotificationConfigs, when triggered, publish messages to the
-// specified Pub/Sub topics. See
-// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
-func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
-}
-
-// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
-func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
-}
-
// ComposeObject concatenates a list of existing objects into a new object in the same
// bucket.
func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.ComposeObject(ctx, req, opts...)
}
-// DeleteObject deletes an object and its metadata.
+// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
+// is not enabled for the bucket, or if the generation parameter is used, or
+// if soft delete (at https://cloud.google.com/storage/docs/soft-delete) is not
+// enabled for the bucket.
+// When this API is used to delete an object from a bucket that has soft
+// delete policy enabled, the object becomes soft deleted, and the
+// softDeleteTime and hardDeleteTime properties are set on the object.
+// This API cannot be used to permanently delete soft-deleted objects.
+// Soft-deleted objects are permanently deleted according to their
+// hardDeleteTime.
+//
+// You can use the [RestoreObject][google.storage.v2.Storage.RestoreObject]
+// API to restore soft-deleted objects until the soft delete retention period
+// has passed.
+//
+// IAM Permissions:
//
-// Deletions are normally permanent when versioning is disabled or whenever
-// the generation parameter is used. However, if soft delete is enabled for
-// the bucket, deleted objects can be restored using RestoreObject until the
-// soft delete retention period has passed.
+// Requires storage.objects.delete
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteObject(ctx, req, opts...)
}
@@ -711,16 +585,52 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel
return c.internalClient.CancelResumableWrite(ctx, req, opts...)
}
-// GetObject retrieves an object’s metadata.
+// GetObject retrieves object metadata.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.get
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket. To return object ACLs, the authenticated user must also have
+// the storage.objects.getIamPolicy permission.
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.GetObject(ctx, req, opts...)
}
-// ReadObject reads an object’s data.
+// ReadObject retrieves object data.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.get
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
return c.internalClient.ReadObject(ctx, req, opts...)
}
+// BidiReadObject reads an object’s data.
+//
+// This is a bi-directional API with the added support for reading multiple
+// ranges within one stream both within and across multiple messages.
+// If the server encountered an error for any of the inputs, the stream will
+// be closed with the relevant error code.
+// Because the API allows for multiple outstanding requests, when the stream
+// is closed the error response will contain a BidiReadObjectRangesError proto
+// in the error extension describing the error for each outstanding read_id.
+//
+// IAM Permissions:
+//
+// # Requires storage.objects.get
+//
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
+//
+// This API is currently in preview and is not yet available for general
+// use.
+func (c *Client) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) {
+ return c.internalClient.BidiReadObject(ctx, opts...)
+}
+
// UpdateObject updates an object’s metadata.
// Equivalent to JSON API’s storage.objects.patch.
func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
@@ -790,6 +700,12 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.create
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
return c.internalClient.WriteObject(ctx, opts...)
}
@@ -814,6 +730,13 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s
}
// ListObjects retrieves a list of objects matching the criteria.
+//
+// IAM Permissions:
+//
+// The authenticated user requires storage.objects.list
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions)
+// to use this method. To return object ACLs, the authenticated user must also
+// have the storage.objects.getIamPolicy permission.
func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
return c.internalClient.ListObjects(ctx, req, opts...)
}
@@ -824,58 +747,47 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject
return c.internalClient.RewriteObject(ctx, req, opts...)
}
-// StartResumableWrite starts a resumable write. How long the write operation remains valid, and
-// what happens when the write operation becomes invalid, are
-// service-dependent.
+// StartResumableWrite starts a resumable write operation. This
+// method is part of the Resumable
+// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
+// This allows you to upload large objects in multiple chunks, which is more
+// resilient to network interruptions than a single upload. The validity
+// duration of the write operation, and the consequences of it becoming
+// invalid, are service-dependent.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.create
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
return c.internalClient.StartResumableWrite(ctx, req, opts...)
}
-// QueryWriteStatus determines the persisted_size for an object that is being written, which
-// can then be used as the write_offset for the next Write() call.
+// QueryWriteStatus determines the persisted_size of an object that is being written. This
+// method is part of the resumable
+// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
+// The returned value is the size of the object that has been persisted so
+// far. The value can be used as the write_offset for the next Write()
+// call.
//
-// If the object does not exist (i.e., the object has been deleted, or the
-// first Write() has not yet reached the service), this method returns the
+// If the object does not exist, meaning if it was deleted, or the
+// first Write() has not yet reached the service, this method returns the
// error NOT_FOUND.
//
-// The client may call QueryWriteStatus() at any time to determine how
-// much data has been processed for this object. This is useful if the
-// client is buffering data and needs to know which data can be safely
-// evicted. For any sequence of QueryWriteStatus() calls for a given
-// object name, the sequence of returned persisted_size values will be
+// This method is useful for clients that buffer data and need to know which
+// data can be safely evicted. The client can call QueryWriteStatus() at any
+// time to determine how much data has been logged for this object.
+// For any sequence of QueryWriteStatus() calls for a given
+// object name, the sequence of returned persisted_size values are
// non-decreasing.
func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
return c.internalClient.QueryWriteStatus(ctx, req, opts...)
}
-// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account.
-func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
- return c.internalClient.GetServiceAccount(ctx, req, opts...)
-}
-
-// CreateHmacKey creates a new HMAC key for the given service account.
-func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
- return c.internalClient.CreateHmacKey(ctx, req, opts...)
-}
-
-// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state.
-func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteHmacKey(ctx, req, opts...)
-}
-
-// GetHmacKey gets an existing HMAC key metadata for the given id.
-func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- return c.internalClient.GetHmacKey(ctx, req, opts...)
-}
-
-// ListHmacKeys lists HMAC keys under a given project with the additional filters provided.
-func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
- return c.internalClient.ListHmacKeys(ctx, req, opts...)
-}
-
-// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE.
-func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- return c.internalClient.UpdateHmacKey(ctx, req, opts...)
+// MoveObject moves the source object to the destination object in the same bucket.
+func (c *Client) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
+ return c.internalClient.MoveObject(ctx, req, opts...)
}
// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport.
@@ -893,6 +805,8 @@ type gRPCClient struct {
// The x-goog-* metadata to be sent with each request.
xGoogHeaders []string
+
+ logger *slog.Logger
}
// NewClient creates a new storage client based on gRPC.
@@ -940,6 +854,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
connPool: connPool,
client: storagepb.NewStorageClient(connPool),
CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
@@ -962,7 +877,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn {
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -988,7 +905,7 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...)
+ _, err = executeRPC(ctx, c.client.DeleteBucket, req, settings.GRPC, c.logger, "DeleteBucket")
return err
}, opts...)
return err
@@ -1012,7 +929,7 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetBucket(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetBucket, req, settings.GRPC, c.logger, "GetBucket")
return err
}, opts...)
if err != nil {
@@ -1042,7 +959,7 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.CreateBucket, req, settings.GRPC, c.logger, "CreateBucket")
return err
}, opts...)
if err != nil {
@@ -1080,7 +997,7 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ListBuckets, req, settings.GRPC, c.logger, "ListBuckets")
return err
}, opts...)
if err != nil {
@@ -1124,7 +1041,7 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.LockBucketRetentionPolicy, req, settings.GRPC, c.logger, "LockBucketRetentionPolicy")
return err
}, opts...)
if err != nil {
@@ -1139,9 +1056,6 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
- }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1154,7 +1068,7 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1169,9 +1083,6 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
- }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1184,7 +1095,7 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1202,6 +1113,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
+ if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
+ }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1214,7 +1128,7 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
var resp *iampb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
return err
}, opts...)
if err != nil {
@@ -1241,7 +1155,7 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.UpdateBucket, req, settings.GRPC, c.logger, "UpdateBucket")
return err
}, opts...)
if err != nil {
@@ -1250,138 +1164,6 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
return resp, nil
}
-func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
- it := &NotificationConfigIterator{}
- req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
- resp := &storagepb.ListNotificationConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
@@ -1400,7 +1182,7 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ComposeObject, req, settings.GRPC, c.logger, "ComposeObject")
return err
}, opts...)
if err != nil {
@@ -1426,7 +1208,7 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- _, err = c.client.DeleteObject(ctx, req, settings.GRPC...)
+ _, err = executeRPC(ctx, c.client.DeleteObject, req, settings.GRPC, c.logger, "DeleteObject")
return err
}, opts...)
return err
@@ -1450,7 +1232,7 @@ func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreOb
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.RestoreObject, req, settings.GRPC, c.logger, "RestoreObject")
return err
}, opts...)
if err != nil {
@@ -1477,7 +1259,7 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca
var resp *storagepb.CancelResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.CancelResumableWrite, req, settings.GRPC, c.logger, "CancelResumableWrite")
return err
}, opts...)
if err != nil {
@@ -1504,7 +1286,7 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetObject, req, settings.GRPC, c.logger, "GetObject")
return err
}, opts...)
if err != nil {
@@ -1531,7 +1313,26 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
var resp storagepb.Storage_ReadObjectClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "ReadObject")
resp, err = c.client.ReadObject(ctx, req, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "ReadObject")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) {
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
+ var resp storagepb.Storage_BidiReadObjectClient
+ opts = append((*c.CallOptions).BidiReadObject[0:len((*c.CallOptions).BidiReadObject):len((*c.CallOptions).BidiReadObject)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiReadObject")
+ resp, err = c.client.BidiReadObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiReadObject")
return err
}, opts...)
if err != nil {
@@ -1558,7 +1359,7 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.UpdateObject, req, settings.GRPC, c.logger, "UpdateObject")
return err
}, opts...)
if err != nil {
@@ -1573,7 +1374,9 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s
opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "WriteObject")
resp, err = c.client.WriteObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "WriteObject")
return err
}, opts...)
if err != nil {
@@ -1588,7 +1391,9 @@ func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption
opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiWriteObject")
resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiWriteObject")
return err
}, opts...)
if err != nil {
@@ -1626,7 +1431,7 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ListObjects(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ListObjects, req, settings.GRPC, c.logger, "ListObjects")
return err
}, opts...)
if err != nil {
@@ -1673,7 +1478,7 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
var resp *storagepb.RewriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.RewriteObject, req, settings.GRPC, c.logger, "RewriteObject")
return err
}, opts...)
if err != nil {
@@ -1700,7 +1505,7 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
var resp *storagepb.StartResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.StartResumableWrite, req, settings.GRPC, c.logger, "StartResumableWrite")
return err
}, opts...)
if err != nil {
@@ -1727,7 +1532,7 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
var resp *storagepb.QueryWriteStatusResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.QueryWriteStatus, req, settings.GRPC, c.logger, "QueryWriteStatus")
return err
}, opts...)
if err != nil {
@@ -1736,170 +1541,11 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
return resp, nil
}
-func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
+func (c *gRPCClient) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...)
- var resp *storagepb.ServiceAccount
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...)
- var resp *storagepb.CreateHmacKeyResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...)
- var resp *storagepb.HmacKeyMetadata
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...)
- it := &HmacKeyMetadataIterator{}
- req = proto.Clone(req).(*storagepb.ListHmacKeysRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) {
- resp := &storagepb.ListHmacKeysResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetHmacKeys(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])
+ if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
@@ -1909,11 +1555,11 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...)
- var resp *storagepb.HmacKeyMetadata
+ opts = append((*c.CallOptions).MoveObject[0:len((*c.CallOptions).MoveObject):len((*c.CallOptions).MoveObject)], opts...)
+ var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.MoveObject, req, settings.GRPC, c.logger, "MoveObject")
return err
}, opts...)
if err != nil {
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
index 3486fd153..6f0ac1ef8 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.23.2
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
// source: google/storage/v2/storage.proto
package storagepb
@@ -27,10 +27,11 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
_ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
date "google.golang.org/genproto/googleapis/type/date"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
+ status1 "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
@@ -197,11 +198,9 @@ type DeleteBucketRequest struct {
func (x *DeleteBucketRequest) Reset() {
*x = DeleteBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteBucketRequest) String() string {
@@ -212,7 +211,7 @@ func (*DeleteBucketRequest) ProtoMessage() {}
func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -270,11 +269,9 @@ type GetBucketRequest struct {
func (x *GetBucketRequest) Reset() {
*x = GetBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetBucketRequest) String() string {
@@ -285,7 +282,7 @@ func (*GetBucketRequest) ProtoMessage() {}
func (x *GetBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -360,11 +357,9 @@ type CreateBucketRequest struct {
func (x *CreateBucketRequest) Reset() {
*x = CreateBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateBucketRequest) String() string {
@@ -375,7 +370,7 @@ func (*CreateBucketRequest) ProtoMessage() {}
func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -452,11 +447,9 @@ type ListBucketsRequest struct {
func (x *ListBucketsRequest) Reset() {
*x = ListBucketsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBucketsRequest) String() string {
@@ -467,7 +460,7 @@ func (*ListBucketsRequest) ProtoMessage() {}
func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -532,11 +525,9 @@ type ListBucketsResponse struct {
func (x *ListBucketsResponse) Reset() {
*x = ListBucketsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBucketsResponse) String() string {
@@ -547,7 +538,7 @@ func (*ListBucketsResponse) ProtoMessage() {}
func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -591,11 +582,9 @@ type LockBucketRetentionPolicyRequest struct {
func (x *LockBucketRetentionPolicyRequest) Reset() {
*x = LockBucketRetentionPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LockBucketRetentionPolicyRequest) String() string {
@@ -606,7 +595,7 @@ func (*LockBucketRetentionPolicyRequest) ProtoMessage() {}
func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -671,11 +660,9 @@ type UpdateBucketRequest struct {
func (x *UpdateBucketRequest) Reset() {
*x = UpdateBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateBucketRequest) String() string {
@@ -686,7 +673,7 @@ func (*UpdateBucketRequest) ProtoMessage() {}
func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -743,296 +730,6 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
-// Request message for DeleteNotificationConfig.
-type DeleteNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The parent bucket of the NotificationConfig.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteNotificationConfigRequest) Reset() {
- *x = DeleteNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteNotificationConfigRequest) ProtoMessage() {}
-
-func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *DeleteNotificationConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Request message for GetNotificationConfig.
-type GetNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The parent bucket of the NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetNotificationConfigRequest) Reset() {
- *x = GetNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNotificationConfigRequest) ProtoMessage() {}
-
-func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *GetNotificationConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Request message for CreateNotificationConfig.
-type CreateNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The bucket to which this NotificationConfig belongs.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. Properties of the NotificationConfig to be inserted.
- NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
-}
-
-func (x *CreateNotificationConfigRequest) Reset() {
- *x = CreateNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateNotificationConfigRequest) ProtoMessage() {}
-
-func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *CreateNotificationConfigRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
- if x != nil {
- return x.NotificationConfig
- }
- return nil
-}
-
-// Request message for ListNotifications.
-type ListNotificationConfigsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of a Google Cloud Storage bucket.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // The maximum number of NotificationConfigs to return. The service may
- // return fewer than this value. The default value is 100. Specifying a value
- // above 100 will result in a page_size of 100.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A page token, received from a previous `ListNotificationConfigs` call.
- // Provide this to retrieve the subsequent page.
- //
- // When paginating, all other parameters provided to `ListNotificationConfigs`
- // must match the call that provided the page token.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListNotificationConfigsRequest) Reset() {
- *x = ListNotificationConfigsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListNotificationConfigsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListNotificationConfigsRequest) ProtoMessage() {}
-
-func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ListNotificationConfigsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListNotificationConfigsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The result of a call to ListNotificationConfigs
-type ListNotificationConfigsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of items.
- NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
- // A token, which can be sent as `page_token` to retrieve the next page.
- // If this field is omitted, there are no subsequent pages.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListNotificationConfigsResponse) Reset() {
- *x = ListNotificationConfigsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListNotificationConfigsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListNotificationConfigsResponse) ProtoMessage() {}
-
-func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
- if x != nil {
- return x.NotificationConfigs
- }
- return nil
-}
-
-func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
// Request message for ComposeObject.
type ComposeObjectRequest struct {
state protoimpl.MessageState
@@ -1068,11 +765,9 @@ type ComposeObjectRequest struct {
func (x *ComposeObjectRequest) Reset() {
*x = ComposeObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest) String() string {
@@ -1082,8 +777,8 @@ func (x *ComposeObjectRequest) String() string {
func (*ComposeObjectRequest) ProtoMessage() {}
func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[7]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1095,7 +790,7 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
}
func (x *ComposeObjectRequest) GetDestination() *Object {
@@ -1191,11 +886,9 @@ type DeleteObjectRequest struct {
func (x *DeleteObjectRequest) Reset() {
*x = DeleteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteObjectRequest) String() string {
@@ -1205,8 +898,8 @@ func (x *DeleteObjectRequest) String() string {
func (*DeleteObjectRequest) ProtoMessage() {}
func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[8]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1218,7 +911,7 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
}
func (x *DeleteObjectRequest) GetBucket() string {
@@ -1290,6 +983,12 @@ type RestoreObjectRequest struct {
Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
// Required. The specific revision of the object to restore.
Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Optional. Restore token used to differentiate soft-deleted objects with the
+ // same name and generation. Only applicable for hierarchical namespace
+ // buckets. This parameter is optional, and is only required in the rare case
+ // when there are multiple soft-deleted objects with the same name and
+ // generation.
+ RestoreToken string `protobuf:"bytes,11,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
@@ -1315,11 +1014,9 @@ type RestoreObjectRequest struct {
func (x *RestoreObjectRequest) Reset() {
*x = RestoreObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreObjectRequest) String() string {
@@ -1329,8 +1026,8 @@ func (x *RestoreObjectRequest) String() string {
func (*RestoreObjectRequest) ProtoMessage() {}
func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[9]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1342,7 +1039,7 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
}
func (x *RestoreObjectRequest) GetBucket() string {
@@ -1366,6 +1063,13 @@ func (x *RestoreObjectRequest) GetGeneration() int64 {
return 0
}
+func (x *RestoreObjectRequest) GetRestoreToken() string {
+ if x != nil {
+ return x.RestoreToken
+ }
+ return ""
+}
+
func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
@@ -1422,11 +1126,9 @@ type CancelResumableWriteRequest struct {
func (x *CancelResumableWriteRequest) Reset() {
*x = CancelResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CancelResumableWriteRequest) String() string {
@@ -1436,8 +1138,8 @@ func (x *CancelResumableWriteRequest) String() string {
func (*CancelResumableWriteRequest) ProtoMessage() {}
func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[10]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1449,7 +1151,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
}
func (x *CancelResumableWriteRequest) GetUploadId() string {
@@ -1469,11 +1171,9 @@ type CancelResumableWriteResponse struct {
func (x *CancelResumableWriteResponse) Reset() {
*x = CancelResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CancelResumableWriteResponse) String() string {
@@ -1483,8 +1183,8 @@ func (x *CancelResumableWriteResponse) String() string {
func (*CancelResumableWriteResponse) ProtoMessage() {}
func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[11]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1496,7 +1196,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
}
// Request message for ReadObject.
@@ -1557,11 +1257,9 @@ type ReadObjectRequest struct {
func (x *ReadObjectRequest) Reset() {
*x = ReadObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadObjectRequest) String() string {
@@ -1571,8 +1269,8 @@ func (x *ReadObjectRequest) String() string {
func (*ReadObjectRequest) ProtoMessage() {}
func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[12]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1584,7 +1282,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
}
func (x *ReadObjectRequest) GetBucket() string {
@@ -1701,15 +1399,19 @@ type GetObjectRequest struct {
// metadata.owner.
// * may be used to mean "all fields".
ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Restore token used to differentiate soft-deleted objects with the
+ // same name and generation. Only applicable for hierarchical namespace
+ // buckets and if soft_deleted is set to true. This parameter is optional, and
+ // is only required in the rare case when there are multiple soft-deleted
+ // objects with the same name and generation.
+ RestoreToken string `protobuf:"bytes,12,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"`
}
func (x *GetObjectRequest) Reset() {
*x = GetObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetObjectRequest) String() string {
@@ -1719,8 +1421,8 @@ func (x *GetObjectRequest) String() string {
func (*GetObjectRequest) ProtoMessage() {}
func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[13]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1732,7 +1434,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
func (*GetObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
}
func (x *GetObjectRequest) GetBucket() string {
@@ -1805,6 +1507,13 @@ func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
return nil
}
+func (x *GetObjectRequest) GetRestoreToken() string {
+ if x != nil {
+ return x.RestoreToken
+ }
+ return ""
+}
+
// Response message for ReadObject.
type ReadObjectResponse struct {
state protoimpl.MessageState
@@ -1831,11 +1540,9 @@ type ReadObjectResponse struct {
func (x *ReadObjectResponse) Reset() {
*x = ReadObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadObjectResponse) String() string {
@@ -1845,8 +1552,8 @@ func (x *ReadObjectResponse) String() string {
func (*ReadObjectResponse) ProtoMessage() {}
func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[14]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1858,7 +1565,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
}
func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
@@ -1889,61 +1596,72 @@ func (x *ReadObjectResponse) GetMetadata() *Object {
return nil
}
-// Describes an attempt to insert an object, possibly over multiple requests.
-type WriteObjectSpec struct {
+// Describes the object to read in a BidiReadObject request.
+type BidiReadObjectSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Destination object, including its name and its metadata.
- Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Makes the operation conditional on whether the object's current
- // generation matches the given value. Setting to 0 makes the operation
- // succeed only if there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live
- // generation does not match the given value. If no live object exists, the
- // precondition fails. Setting to 0 makes the operation succeed only if
- // there is a live version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Required. The name of the bucket containing the object to read.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to read.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, selects a specific revision of this object (as opposed
+ // to the latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // The expected final object size being uploaded.
- // If this value is set, closing the stream after writing fewer or more than
- // `object_size` bytes will result in an OUT_OF_RANGE error.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // The checksummed_data field and its children will always be present.
+ // If no mask is specified, will default to all fields except metadata.owner
+ // and metadata.acl.
+ // * may be used to mean "all fields".
+ // As per https://google.aip.dev/161, this field is deprecated.
+ // As an alternative, grpc metadata can be used:
+ // https://cloud.google.com/apis/docs/system-parameters#definitions
//
- // This situation is considered a client error, and if such an error occurs
- // you must start the upload over from scratch, this time sending the correct
- // number of bytes.
- ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
+ // Deprecated: Marked as deprecated in google/storage/v2/storage.proto.
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // The client can optionally set this field. The read handle is an optimized
+ // way of creating new streams. Read handles are generated and periodically
+ // refreshed from prior reads.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,13,opt,name=read_handle,json=readHandle,proto3,oneof" json:"read_handle,omitempty"`
+ // The routing token that influences request routing for the stream. Must be
+ // provided if a BidiReadObjectRedirectedError is returned.
+ RoutingToken *string `protobuf:"bytes,14,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
}
-func (x *WriteObjectSpec) Reset() {
- *x = WriteObjectSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadObjectSpec) Reset() {
+ *x = BidiReadObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectSpec) String() string {
+func (x *BidiReadObjectSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectSpec) ProtoMessage() {}
+func (*BidiReadObjectSpec) ProtoMessage() {}
-func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1953,125 +1671,123 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
-func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
+// Deprecated: Use BidiReadObjectSpec.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
}
-func (x *WriteObjectSpec) GetResource() *Object {
+func (x *BidiReadObjectSpec) GetBucket() string {
if x != nil {
- return x.Resource
+ return x.Bucket
}
- return nil
+ return ""
}
-func (x *WriteObjectSpec) GetPredefinedAcl() string {
+func (x *BidiReadObjectSpec) GetObject() string {
if x != nil {
- return x.PredefinedAcl
+ return x.Object
}
return ""
}
-func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+func (x *BidiReadObjectSpec) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *BidiReadObjectSpec) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfGenerationNotMatch() int64 {
if x != nil && x.IfGenerationNotMatch != nil {
return *x.IfGenerationNotMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfMetagenerationMatch() int64 {
if x != nil && x.IfMetagenerationMatch != nil {
return *x.IfMetagenerationMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfMetagenerationNotMatch() int64 {
if x != nil && x.IfMetagenerationNotMatch != nil {
return *x.IfMetagenerationNotMatch
}
return 0
}
-func (x *WriteObjectSpec) GetObjectSize() int64 {
- if x != nil && x.ObjectSize != nil {
- return *x.ObjectSize
+func (x *BidiReadObjectSpec) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
- return 0
+ return nil
}
-// Request message for WriteObject.
-type WriteObjectRequest struct {
+// Deprecated: Marked as deprecated in google/storage/v2/storage.proto.
+func (x *BidiReadObjectSpec) GetReadMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.ReadMask
+ }
+ return nil
+}
+
+func (x *BidiReadObjectSpec) GetReadHandle() *BidiReadHandle {
+ if x != nil {
+ return x.ReadHandle
+ }
+ return nil
+}
+
+func (x *BidiReadObjectSpec) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
+
+// Request message for BidiReadObject.
+type BidiReadObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- // *WriteObjectRequest_UploadId
- // *WriteObjectRequest_WriteObjectSpec
- FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An incorrect value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- // *WriteObjectRequest_ChecksummedData
- Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The first message of each stream should set this field. If this is not
+ // the first message, an error will be returned. Describes the object to read.
+ ReadObjectSpec *BidiReadObjectSpec `protobuf:"bytes,1,opt,name=read_object_spec,json=readObjectSpec,proto3" json:"read_object_spec,omitempty"`
+ // Provides a list of 0 or more (up to 100) ranges to read. If a single range
+ // is large enough to require multiple responses, they are guaranteed to be
+ // delivered in increasing offset order. There are no ordering guarantees
+ // across ranges. When no ranges are provided, the response message will not
+ // include ObjectRangeData. For full object downloads, the offset and size can
+ // be set to 0.
+ ReadRanges []*ReadRange `protobuf:"bytes,8,rep,name=read_ranges,json=readRanges,proto3" json:"read_ranges,omitempty"`
}
-func (x *WriteObjectRequest) Reset() {
- *x = WriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadObjectRequest) Reset() {
+ *x = BidiReadObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectRequest) String() string {
+func (x *BidiReadObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest) ProtoMessage() {}
+func (*BidiReadObjectRequest) ProtoMessage() {}
-func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2081,138 +1797,192 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
+// Deprecated: Use BidiReadObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
}
-func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *BidiReadObjectRequest) GetReadObjectSpec() *BidiReadObjectSpec {
+ if x != nil {
+ return x.ReadObjectSpec
}
return nil
}
-func (x *WriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
- return x.UploadId
+func (x *BidiReadObjectRequest) GetReadRanges() []*ReadRange {
+ if x != nil {
+ return x.ReadRanges
}
- return ""
+ return nil
}
-func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
- }
- return nil
+// Response message for BidiReadObject.
+type BidiReadObjectResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A portion of the object's data. The service **may** leave data
+ // empty for any given ReadResponse. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ // The service **may** pipeline multiple responses belonging to different read
+ // requests. Each ObjectRangeData entry will have a read_id
+ // set to the same value as the corresponding source read request.
+ ObjectDataRanges []*ObjectRangeData `protobuf:"bytes,6,rep,name=object_data_ranges,json=objectDataRanges,proto3" json:"object_data_ranges,omitempty"`
+ // Metadata of the object whose media is being returned.
+ // Only populated in the first response in the stream and not populated when
+ // the stream is opened with a read handle.
+ Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // This field will be periodically refreshed, however it may not be set in
+ // every response. It allows the client to more efficiently open subsequent
+ // bidirectional streams to the same object.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,7,opt,name=read_handle,json=readHandle,proto3" json:"read_handle,omitempty"`
}
-func (x *WriteObjectRequest) GetWriteOffset() int64 {
+func (x *BidiReadObjectResponse) Reset() {
+ *x = BidiReadObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiReadObjectResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiReadObjectResponse) ProtoMessage() {}
+
+func (x *BidiReadObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
if x != nil {
- return x.WriteOffset
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
- if m != nil {
- return m.Data
- }
- return nil
+// Deprecated: Use BidiReadObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
}
-func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
+func (x *BidiReadObjectResponse) GetObjectDataRanges() []*ObjectRangeData {
+ if x != nil {
+ return x.ObjectDataRanges
}
return nil
}
-func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *BidiReadObjectResponse) GetMetadata() *Object {
if x != nil {
- return x.ObjectChecksums
+ return x.Metadata
}
return nil
}
-func (x *WriteObjectRequest) GetFinishWrite() bool {
- if x != nil {
- return x.FinishWrite
- }
- return false
-}
-
-func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *BidiReadObjectResponse) GetReadHandle() *BidiReadHandle {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.ReadHandle
}
return nil
}
-type isWriteObjectRequest_FirstMessage interface {
- isWriteObjectRequest_FirstMessage()
+// Error proto containing details for a redirected read. This error is only
+// returned on initial open in case of a redirect.
+type BidiReadObjectRedirectedError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The read handle for the redirected read. The client can use this for the
+ // subsequent open.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,1,opt,name=read_handle,json=readHandle,proto3" json:"read_handle,omitempty"`
+ // The routing token that should be used when reopening the read stream.
+ RoutingToken *string `protobuf:"bytes,2,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
}
-type WriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) Reset() {
+ *x = BidiReadObjectRedirectedError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type WriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+func (*BidiReadObjectRedirectedError) ProtoMessage() {}
-func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+func (x *BidiReadObjectRedirectedError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-type isWriteObjectRequest_Data interface {
- isWriteObjectRequest_Data()
+// Deprecated: Use BidiReadObjectRedirectedError.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectRedirectedError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
}
-type WriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) GetReadHandle() *BidiReadHandle {
+ if x != nil {
+ return x.ReadHandle
+ }
+ return nil
}
-func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+func (x *BidiReadObjectRedirectedError) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
-// Response message for WriteObject.
-type WriteObjectResponse struct {
+// Error proto containing details for a redirected write. This error is only
+// returned on initial open in case of a redirect.
+type BidiWriteObjectRedirectedError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- // *WriteObjectResponse_PersistedSize
- // *WriteObjectResponse_Resource
- WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // The routing token that should be used when reopening the write stream.
+ RoutingToken *string `protobuf:"bytes,1,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
+ // Opaque value describing a previous write.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,2,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
+ // The generation of the object that triggered the redirect.
+ // Note that if this error was returned as part of an appendable object
+ // create, this object generation is now successfully created and
+ // append_object_spec should be used when reconnecting.
+ Generation *int64 `protobuf:"varint,3,opt,name=generation,proto3,oneof" json:"generation,omitempty"`
}
-func (x *WriteObjectResponse) Reset() {
- *x = WriteObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiWriteObjectRedirectedError) Reset() {
+ *x = BidiWriteObjectRedirectedError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectResponse) String() string {
+func (x *BidiWriteObjectRedirectedError) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectResponse) ProtoMessage() {}
+func (*BidiWriteObjectRedirectedError) ProtoMessage() {}
-func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiWriteObjectRedirectedError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2222,130 +1992,108 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
+// Deprecated: Use BidiWriteObjectRedirectedError.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRedirectedError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
}
-func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
+func (x *BidiWriteObjectRedirectedError) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
+
+func (x *BidiWriteObjectRedirectedError) GetWriteHandle() *BidiWriteHandle {
+ if x != nil {
+ return x.WriteHandle
}
return nil
}
-func (x *WriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
+func (x *BidiWriteObjectRedirectedError) GetGeneration() int64 {
+ if x != nil && x.Generation != nil {
+ return *x.Generation
}
return 0
}
-func (x *WriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
- return x.Resource
- }
- return nil
+// Error extension proto containing details for all outstanding reads on the
+// failed stream
+type BidiReadObjectError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The error code for each outstanding read_range
+ ReadRangeErrors []*ReadRangeError `protobuf:"bytes,1,rep,name=read_range_errors,json=readRangeErrors,proto3" json:"read_range_errors,omitempty"`
}
-type isWriteObjectResponse_WriteStatus interface {
- isWriteObjectResponse_WriteStatus()
+func (x *BidiReadObjectError) Reset() {
+ *x = BidiReadObjectError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type WriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+func (x *BidiReadObjectError) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type WriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+func (*BidiReadObjectError) ProtoMessage() {}
+
+func (x *BidiReadObjectError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
+// Deprecated: Use BidiReadObjectError.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
+}
-func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
+func (x *BidiReadObjectError) GetReadRangeErrors() []*ReadRangeError {
+ if x != nil {
+ return x.ReadRangeErrors
+ }
+ return nil
+}
-// Request message for BidiWriteObject.
-type BidiWriteObjectRequest struct {
+// Error extension proto containing details for a single range read
+type ReadRangeError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- // *BidiWriteObjectRequest_UploadId
- // *BidiWriteObjectRequest_WriteObjectSpec
- FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An invalid value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- // *BidiWriteObjectRequest_ChecksummedData
- Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // For each BidiWriteObjectRequest where state_lookup is `true` or the client
- // closes the stream, the service will send a BidiWriteObjectResponse
- // containing the current persisted size. The persisted size sent in responses
- // covers all the bytes the server has persisted thus far and can be used to
- // decide what data is safe for the client to drop. Note that the object's
- // current size reported by the BidiWriteObjectResponse may lag behind the
- // number of bytes written by the client.
- StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
- // Persists data written on the stream, up to and including the current
- // message, to permanent storage. This option should be used sparingly as it
- // may reduce performance. Ongoing writes will periodically be persisted on
- // the server even when `flush` is not set.
- Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The id of the corresponding read_range
+ ReadId int64 `protobuf:"varint,1,opt,name=read_id,json=readId,proto3" json:"read_id,omitempty"`
+ // The status which should be an enum value of [google.rpc.Code].
+ Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
}
-func (x *BidiWriteObjectRequest) Reset() {
- *x = BidiWriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *ReadRangeError) Reset() {
+ *x = ReadRangeError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *BidiWriteObjectRequest) String() string {
+func (x *ReadRangeError) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectRequest) ProtoMessage() {}
+func (*ReadRangeError) ProtoMessage() {}
-func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *ReadRangeError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2355,152 +2103,205 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
+// Deprecated: Use ReadRangeError.ProtoReflect.Descriptor instead.
+func (*ReadRangeError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
}
-func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *ReadRangeError) GetReadId() int64 {
+ if x != nil {
+ return x.ReadId
}
- return nil
+ return 0
}
-func (x *BidiWriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
- return x.UploadId
+func (x *ReadRangeError) GetStatus() *status.Status {
+ if x != nil {
+ return x.Status
}
- return ""
+ return nil
}
-func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
- }
- return nil
+// Describes a range of bytes to read in a BidiReadObjectRanges request.
+type ReadRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The offset for the first byte to return in the read, relative to
+ // the start of the object.
+ //
+ // A negative read_offset value will be interpreted as the number of bytes
+ // back from the end of the object to be returned. For example, if an object's
+ // length is 15 bytes, a ReadObjectRequest with read_offset = -5 and
+ // read_length = 3 would return bytes 10 through 12 of the object. Requesting
+ // a negative offset with magnitude larger than the size of the object will
+ // return the entire object. A read_offset larger than the size of the object
+ // will result in an OutOfRange error.
+ ReadOffset int64 `protobuf:"varint,1,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
+ // Optional. The maximum number of data bytes the server is allowed to return
+ // across all response messages with the same read_id. A read_length of zero
+ // indicates to read until the resource end, and a negative read_length will
+ // cause an error. If the stream returns fewer bytes than allowed by the
+ // read_length and no error occurred, the stream includes all data from the
+ // read_offset to the resource end.
+ ReadLength int64 `protobuf:"varint,2,opt,name=read_length,json=readLength,proto3" json:"read_length,omitempty"`
+ // Required. Read identifier provided by the client. When the client issues
+ // more than one outstanding ReadRange on the same stream, responses can be
+ // mapped back to their corresponding requests using this value. Clients must
+ // ensure that all outstanding requests have different read_id values. The
+ // server may close the stream with an error if this condition is not met.
+ ReadId int64 `protobuf:"varint,3,opt,name=read_id,json=readId,proto3" json:"read_id,omitempty"`
+}
+
+func (x *ReadRange) Reset() {
+ *x = ReadRange{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
- if x != nil {
- return x.WriteOffset
- }
- return 0
+func (x *ReadRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
- if m != nil {
- return m.Data
+func (*ReadRange) ProtoMessage() {}
+
+func (x *ReadRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
- }
- return nil
+// Deprecated: Use ReadRange.ProtoReflect.Descriptor instead.
+func (*ReadRange) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
}
-func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ReadRange) GetReadOffset() int64 {
if x != nil {
- return x.ObjectChecksums
+ return x.ReadOffset
}
- return nil
+ return 0
}
-func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+func (x *ReadRange) GetReadLength() int64 {
if x != nil {
- return x.StateLookup
+ return x.ReadLength
}
- return false
+ return 0
}
-func (x *BidiWriteObjectRequest) GetFlush() bool {
+func (x *ReadRange) GetReadId() int64 {
if x != nil {
- return x.Flush
+ return x.ReadId
}
- return false
+ return 0
}
-func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
- if x != nil {
- return x.FinishWrite
- }
- return false
-}
+// Contains data and metadata for a range of an object.
+type ObjectRangeData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
+ // A portion of the data for the object.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
+ // The ReadRange describes the content being returned with read_id set to the
+ // corresponding ReadObjectRequest in the stream. Multiple ObjectRangeData
+ // messages may have the same read_id but increasing offsets.
+ // ReadObjectResponse messages with the same read_id are guaranteed to be
+ // delivered in increasing offset order.
+ ReadRange *ReadRange `protobuf:"bytes,2,opt,name=read_range,json=readRange,proto3" json:"read_range,omitempty"`
+ // If set, indicates there are no more bytes to read for the given ReadRange.
+ RangeEnd bool `protobuf:"varint,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (x *ObjectRangeData) Reset() {
+ *x = ObjectRangeData{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type isBidiWriteObjectRequest_FirstMessage interface {
- isBidiWriteObjectRequest_FirstMessage()
+func (x *ObjectRangeData) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type BidiWriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
-}
+func (*ObjectRangeData) ProtoMessage() {}
-type BidiWriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+func (x *ObjectRangeData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
-
-func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+// Deprecated: Use ObjectRangeData.ProtoReflect.Descriptor instead.
+func (*ObjectRangeData) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
+}
-type isBidiWriteObjectRequest_Data interface {
- isBidiWriteObjectRequest_Data()
+func (x *ObjectRangeData) GetChecksummedData() *ChecksummedData {
+ if x != nil {
+ return x.ChecksummedData
+ }
+ return nil
}
-type BidiWriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+func (x *ObjectRangeData) GetReadRange() *ReadRange {
+ if x != nil {
+ return x.ReadRange
+ }
+ return nil
}
-func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
+func (x *ObjectRangeData) GetRangeEnd() bool {
+ if x != nil {
+ return x.RangeEnd
+ }
+ return false
+}
-// Response message for BidiWriteObject.
-type BidiWriteObjectResponse struct {
+// BidiReadHandle contains a handle from a previous BiDiReadObject
+// invocation. The client can use this instead of BidiReadObjectSpec as an
+// optimized way of opening subsequent bidirectional streams to the same object.
+type BidiReadHandle struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- // *BidiWriteObjectResponse_PersistedSize
- // *BidiWriteObjectResponse_Resource
- WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // Required. Opaque value describing a previous read.
+ Handle []byte `protobuf:"bytes,1,opt,name=handle,proto3" json:"handle,omitempty"`
}
-func (x *BidiWriteObjectResponse) Reset() {
- *x = BidiWriteObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadHandle) Reset() {
+ *x = BidiReadHandle{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *BidiWriteObjectResponse) String() string {
+func (x *BidiReadHandle) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectResponse) ProtoMessage() {}
+func (*BidiReadHandle) ProtoMessage() {}
-func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
+func (x *BidiReadHandle) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2510,129 +2311,123 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use BidiReadHandle.ProtoReflect.Descriptor instead.
+func (*BidiReadHandle) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
}
-func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
+func (x *BidiReadHandle) GetHandle() []byte {
+ if x != nil {
+ return x.Handle
}
return nil
}
-func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
- }
- return 0
-}
+// BidiWriteHandle contains a handle from a previous BidiWriteObject
+// invocation. The client can use this as an optimized way of opening subsequent
+// bidirectional streams to the same object.
+type BidiWriteHandle struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *BidiWriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
- return x.Resource
- }
- return nil
+ // Required. Opaque value describing a previous write.
+ Handle []byte `protobuf:"bytes,1,opt,name=handle,proto3" json:"handle,omitempty"`
}
-type isBidiWriteObjectResponse_WriteStatus interface {
- isBidiWriteObjectResponse_WriteStatus()
+func (x *BidiWriteHandle) Reset() {
+ *x = BidiWriteHandle{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type BidiWriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+func (x *BidiWriteHandle) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type BidiWriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+func (*BidiWriteHandle) ProtoMessage() {}
+
+func (x *BidiWriteHandle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
+// Deprecated: Use BidiWriteHandle.ProtoReflect.Descriptor instead.
+func (*BidiWriteHandle) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
+}
-func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
+func (x *BidiWriteHandle) GetHandle() []byte {
+ if x != nil {
+ return x.Handle
+ }
+ return nil
+}
-// Request message for ListObjects.
-type ListObjectsRequest struct {
+// Describes an attempt to insert an object, possibly over multiple requests.
+type WriteObjectSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which to look for objects.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Maximum number of `items` plus `prefixes` to return
- // in a single page of responses. As duplicate `prefixes` are
- // omitted, fewer total results may be returned than requested. The service
- // will use this parameter or 1,000 items, whichever is smaller.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously-returned page token representing part of the larger set of
- // results to view.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, returns results in a directory-like mode. `items` will contain
- // only objects whose names, aside from the `prefix`, do not
- // contain `delimiter`. Objects whose names, aside from the
- // `prefix`, contain `delimiter` will have their name,
- // truncated after the `delimiter`, returned in
- // `prefixes`. Duplicate `prefixes` are omitted.
- Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
- // If true, objects that end in exactly one instance of `delimiter`
- // will have their metadata included in `items` in addition to
- // `prefixes`.
- IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
- // Filter results to objects whose names begin with this prefix.
- Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
- // If `true`, lists all versions of an object as distinct results.
- // For more information, see
- // [Object
- // Versioning](https://cloud.google.com/storage/docs/object-versioning).
- Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
- // Mask specifying which fields to read from each result.
- // If no mask is specified, will default to all fields except items.acl and
- // items.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically equal
- // to or after lexicographic_start. If lexicographic_end is also set, the
- // objects listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically
- // before lexicographic_end. If lexicographic_start is also set, the objects
- // listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
- // Optional. If true, only list all soft-deleted versions of the object.
- // Soft delete policy is required to set this option.
- SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
- // Optional. Filter results to objects and prefixes that match this glob
- // pattern. See [List Objects Using
- // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
- // for the full syntax.
- MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+ // Required. Destination object, including its name and its metadata.
+ Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // generation matches the given value. Setting to 0 makes the operation
+ // succeed only if there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live
+ // generation does not match the given value. If no live object exists, the
+ // precondition fails. Setting to 0 makes the operation succeed only if
+ // there is a live version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // The expected final object size being uploaded.
+ // If this value is set, closing the stream after writing fewer or more than
+ // `object_size` bytes will result in an OUT_OF_RANGE error.
+ //
+ // This situation is considered a client error, and if such an error occurs
+ // you must start the upload over from scratch, this time sending the correct
+ // number of bytes.
+ ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
+ // If true, the object will be created in appendable mode.
+ // This field may only be set when using BidiWriteObject.
+ Appendable *bool `protobuf:"varint,9,opt,name=appendable,proto3,oneof" json:"appendable,omitempty"`
}
-func (x *ListObjectsRequest) Reset() {
- *x = ListObjectsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *WriteObjectSpec) Reset() {
+ *x = WriteObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListObjectsRequest) String() string {
+func (x *WriteObjectSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListObjectsRequest) ProtoMessage() {}
+func (*WriteObjectSpec) ProtoMessage() {}
-func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2642,126 +2437,132 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
-func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
+// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
+func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
}
-func (x *ListObjectsRequest) GetParent() string {
+func (x *WriteObjectSpec) GetResource() *Object {
if x != nil {
- return x.Parent
+ return x.Resource
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetPageSize() int32 {
+func (x *WriteObjectSpec) GetPredefinedAcl() string {
if x != nil {
- return x.PageSize
+ return x.PredefinedAcl
}
- return 0
+ return ""
}
-func (x *ListObjectsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
+func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return ""
+ return 0
}
-func (x *ListObjectsRequest) GetDelimiter() string {
- if x != nil {
- return x.Delimiter
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
- if x != nil {
- return x.IncludeTrailingDelimiter
- }
- return false
-}
-
-func (x *ListObjectsRequest) GetPrefix() string {
- if x != nil {
- return x.Prefix
- }
- return ""
-}
-
-func (x *ListObjectsRequest) GetVersions() bool {
- if x != nil {
- return x.Versions
+func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
}
- return false
+ return 0
}
-func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.ReadMask
+func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
}
- return nil
+ return 0
}
-func (x *ListObjectsRequest) GetLexicographicStart() string {
- if x != nil {
- return x.LexicographicStart
+func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return ""
+ return 0
}
-func (x *ListObjectsRequest) GetLexicographicEnd() string {
- if x != nil {
- return x.LexicographicEnd
+func (x *WriteObjectSpec) GetObjectSize() int64 {
+ if x != nil && x.ObjectSize != nil {
+ return *x.ObjectSize
}
- return ""
+ return 0
}
-func (x *ListObjectsRequest) GetSoftDeleted() bool {
- if x != nil {
- return x.SoftDeleted
+func (x *WriteObjectSpec) GetAppendable() bool {
+ if x != nil && x.Appendable != nil {
+ return *x.Appendable
}
return false
}
-func (x *ListObjectsRequest) GetMatchGlob() string {
- if x != nil {
- return x.MatchGlob
- }
- return ""
-}
-
-// Request object for `QueryWriteStatus`.
-type QueryWriteStatusRequest struct {
+// Request message for WriteObject.
+type WriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The name of the resume token for the object whose write status is
- // being requested.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
+ // The first message of each stream should set one of the following.
+ //
+ // Types that are assignable to FirstMessage:
+ //
+ // *WriteObjectRequest_UploadId
+ // *WriteObjectRequest_WriteObjectSpec
+ FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An incorrect value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *WriteObjectRequest_ChecksummedData
+ Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first or last request (either with first_message, or
+ // finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *QueryWriteStatusRequest) Reset() {
- *x = QueryWriteStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *WriteObjectRequest) Reset() {
+ *x = WriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *QueryWriteStatusRequest) String() string {
+func (x *WriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*QueryWriteStatusRequest) ProtoMessage() {}
+func (*WriteObjectRequest) ProtoMessage() {}
-func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2771,27 +2572,108 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
+// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
}
-func (x *QueryWriteStatusRequest) GetUploadId() string {
- if x != nil {
+func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
return x.UploadId
}
return ""
}
-func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetWriteOffset() int64 {
+ if x != nil {
+ return x.WriteOffset
+ }
+ return 0
+}
+
+func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetFinishWrite() bool {
+ if x != nil {
+ return x.FinishWrite
+ }
+ return false
+}
+
+func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
return x.CommonObjectRequestParams
}
return nil
}
-// Response object for `QueryWriteStatus`.
-type QueryWriteStatusResponse struct {
+type isWriteObjectRequest_FirstMessage interface {
+ isWriteObjectRequest_FirstMessage()
+}
+
+type WriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+}
+
+type WriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+}
+
+func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+
+func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+
+type isWriteObjectRequest_Data interface {
+ isWriteObjectRequest_Data()
+}
+
+type WriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+}
+
+func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+
+// Response message for WriteObject.
+type WriteObjectResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@@ -2799,29 +2681,28 @@ type QueryWriteStatusResponse struct {
// The response will set one of the following.
//
// Types that are assignable to WriteStatus:
- // *QueryWriteStatusResponse_PersistedSize
- // *QueryWriteStatusResponse_Resource
- WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
+ //
+ // *WriteObjectResponse_PersistedSize
+ // *WriteObjectResponse_Resource
+ WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
}
-func (x *QueryWriteStatusResponse) Reset() {
- *x = QueryWriteStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *WriteObjectResponse) Reset() {
+ *x = WriteObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *QueryWriteStatusResponse) String() string {
+func (x *WriteObjectResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*QueryWriteStatusResponse) ProtoMessage() {}
+func (*WriteObjectResponse) ProtoMessage() {}
-func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2831,181 +2712,94 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
-func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
+// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
}
-func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
+func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
if m != nil {
return m.WriteStatus
}
return nil
}
-func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
- return x.PersistedSize
+func (x *WriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
}
return 0
}
-func (x *QueryWriteStatusResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
+func (x *WriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
return x.Resource
}
return nil
}
-type isQueryWriteStatusResponse_WriteStatus interface {
- isQueryWriteStatusResponse_WriteStatus()
+type isWriteObjectResponse_WriteStatus interface {
+ isWriteObjectResponse_WriteStatus()
}
-type QueryWriteStatusResponse_PersistedSize struct {
+type WriteObjectResponse_PersistedSize struct {
// The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. This is the correct value for the
- // 'write_offset' field to use when resuming the `WriteObject` operation.
- // Only set if the upload has not finalized.
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
}
-type QueryWriteStatusResponse_Resource struct {
+type WriteObjectResponse_Resource struct {
// A resource containing the metadata for the uploaded object. Only set if
// the upload has finalized.
Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
}
-func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
+func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
-func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
+func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
-// Request message for RewriteObject.
-// If the source object is encrypted using a Customer-Supplied Encryption Key
-// the key information must be provided in the copy_source_encryption_algorithm,
-// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes
-// fields. If the destination object should be encrypted the keying information
-// should be provided in the encryption_algorithm, encryption_key_bytes, and
-// encryption_key_sha256_bytes fields of the
-// common_object_request_params.customer_encryption field.
-type RewriteObjectRequest struct {
+// Describes an attempt to append to an object, possibly over multiple requests.
+type AppendObjectSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Immutable. The name of the destination object.
- // See the
- // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
- // Example: `test.txt`
- // The `name` field by itself does not uniquely identify a Cloud Storage
- // object. A Cloud Storage object is uniquely identified by the tuple of
- // (bucket, object, generation).
- DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"`
- // Required. Immutable. The name of the bucket containing the destination
- // object.
- DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"`
- // The name of the Cloud KMS key that will be used to encrypt the destination
- // object. The Cloud KMS key must be located in same location as the object.
- // If the parameter is not specified, the request uses the destination
- // bucket's default encryption key, if any, or else the Google-managed
- // encryption key.
- DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"`
- // Properties of the destination, post-rewrite object.
- // The `name`, `bucket` and `kms_key` fields must not be populated (these
- // values are specified in the `destination_name`, `destination_bucket`, and
- // `destination_kms_key` fields).
- // If `destination` is present it will be used to construct the destination
- // object's metadata; otherwise the destination object's metadata will be
- // copied from the source object.
- Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
- // Required. Name of the bucket in which to find the source object.
- SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"`
- // Required. Name of the source object.
- SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"`
- // If present, selects a specific revision of the source object (as opposed to
- // the latest version, the default).
- SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"`
- // Include this field (from the previous rewrite response) on each rewrite
- // request after the first one, until the rewrite response 'done' flag is
- // true. Calls that provide a rewriteToken can omit all other request fields,
- // but if included those fields must match the values provided in the first
- // rewrite request.
- RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
- // Apply a predefined set of access controls to the destination object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the destination object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the destination object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // Makes the operation conditional on whether the source object's live
- // generation matches the given value.
- IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"`
- // Makes the operation conditional on whether the source object's live
- // generation does not match the given value.
- IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the source object's current
+ // Required. The name of the bucket containing the object to write.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to open for writing.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // Required. The generation number of the object to open for writing.
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the source object's current
+ IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
// metageneration does not match the given value.
- IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"`
- // The maximum number of bytes that will be rewritten per rewrite request.
- // Most callers
- // shouldn't need to specify this parameter - it is primarily in place to
- // support testing. If specified the value must be an integral multiple of
- // 1 MiB (1048576). Also, this only applies to requests where the source and
- // destination span locations and/or storage classes. Finally, this value must
- // not change across rewrite calls else you'll get an error that the
- // `rewriteToken` is invalid.
- MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"`
- // The algorithm used to encrypt the source object, if any. Used if the source
- // object was encrypted with a Customer-Supplied Encryption Key.
- CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"`
- // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt
- // the source object, if it was encrypted with a Customer-Supplied Encryption
- // Key.
- CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"`
- // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used
- // to encrypt the source object, if it was encrypted with a Customer-Supplied
- // Encryption Key.
- CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // The checksums of the complete object. This will be used to validate the
- // destination object after rewriting.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // An optional routing token that influences request routing for the stream.
+ // Must be provided if a BidiWriteObjectRedirectedError is returned.
+ RoutingToken *string `protobuf:"bytes,6,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
+ // An optional write handle returned from a previous BidiWriteObjectResponse
+ // message or a BidiWriteObjectRedirectedError error.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,7,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
}
-func (x *RewriteObjectRequest) Reset() {
- *x = RewriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *AppendObjectSpec) Reset() {
+ *x = AppendObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *RewriteObjectRequest) String() string {
+func (x *AppendObjectSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*RewriteObjectRequest) ProtoMessage() {}
+func (*AppendObjectSpec) ProtoMessage() {}
-func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *AppendObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3015,213 +2809,588 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
-}
-
-func (x *RewriteObjectRequest) GetDestinationName() string {
- if x != nil {
- return x.DestinationName
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestinationBucket() string {
- if x != nil {
- return x.DestinationBucket
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestinationKmsKey() string {
- if x != nil {
- return x.DestinationKmsKey
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestination() *Object {
- if x != nil {
- return x.Destination
- }
- return nil
-}
-
-func (x *RewriteObjectRequest) GetSourceBucket() string {
- if x != nil {
- return x.SourceBucket
- }
- return ""
+// Deprecated: Use AppendObjectSpec.ProtoReflect.Descriptor instead.
+func (*AppendObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
}
-func (x *RewriteObjectRequest) GetSourceObject() string {
+func (x *AppendObjectSpec) GetBucket() string {
if x != nil {
- return x.SourceObject
+ return x.Bucket
}
return ""
}
-func (x *RewriteObjectRequest) GetSourceGeneration() int64 {
- if x != nil {
- return x.SourceGeneration
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetRewriteToken() string {
+func (x *AppendObjectSpec) GetObject() string {
if x != nil {
- return x.RewriteToken
+ return x.Object
}
return ""
}
-func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string {
+func (x *AppendObjectSpec) GetGeneration() int64 {
if x != nil {
- return x.DestinationPredefinedAcl
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
+ return x.Generation
}
return 0
}
-func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 {
+func (x *AppendObjectSpec) GetIfMetagenerationMatch() int64 {
if x != nil && x.IfMetagenerationMatch != nil {
return *x.IfMetagenerationMatch
}
return 0
}
-func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+func (x *AppendObjectSpec) GetIfMetagenerationNotMatch() int64 {
if x != nil && x.IfMetagenerationNotMatch != nil {
return *x.IfMetagenerationNotMatch
}
return 0
}
-func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 {
- if x != nil && x.IfSourceGenerationMatch != nil {
- return *x.IfSourceGenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 {
- if x != nil && x.IfSourceGenerationNotMatch != nil {
- return *x.IfSourceGenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 {
- if x != nil && x.IfSourceMetagenerationMatch != nil {
- return *x.IfSourceMetagenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
- if x != nil && x.IfSourceMetagenerationNotMatch != nil {
- return *x.IfSourceMetagenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 {
- if x != nil {
- return x.MaxBytesRewrittenPerCall
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string {
- if x != nil {
- return x.CopySourceEncryptionAlgorithm
+func (x *AppendObjectSpec) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
}
return ""
}
-func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte {
+func (x *AppendObjectSpec) GetWriteHandle() *BidiWriteHandle {
if x != nil {
- return x.CopySourceEncryptionKeyBytes
+ return x.WriteHandle
}
return nil
}
-func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte {
- if x != nil {
- return x.CopySourceEncryptionKeySha256Bytes
+// Request message for BidiWriteObject.
+type BidiWriteObjectRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The first message of each stream should set one of the following.
+ //
+ // Types that are assignable to FirstMessage:
+ //
+ // *BidiWriteObjectRequest_UploadId
+ // *BidiWriteObjectRequest_WriteObjectSpec
+ // *BidiWriteObjectRequest_AppendObjectSpec
+ FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An invalid value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *BidiWriteObjectRequest_ChecksummedData
+ Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first request or the
+ // last request (with finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ // closes the stream, the service will send a BidiWriteObjectResponse
+ // containing the current persisted size. The persisted size sent in responses
+ // covers all the bytes the server has persisted thus far and can be used to
+ // decide what data is safe for the client to drop. Note that the object's
+ // current size reported by the BidiWriteObjectResponse may lag behind the
+ // number of bytes written by the client. This field is ignored if
+ // `finish_write` is set to true.
+ StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
+ // Persists data written on the stream, up to and including the current
+ // message, to permanent storage. This option should be used sparingly as it
+ // may reduce performance. Ongoing writes will periodically be persisted on
+ // the server even when `flush` is not set. This field is ignored if
+ // `finish_write` is set to true since there's no need to checkpoint or flush
+ // if this message completes the write.
+ Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+}
+
+func (x *BidiWriteObjectRequest) Reset() {
+ *x = BidiWriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiWriteObjectRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiWriteObjectRequest) ProtoMessage() {}
+
+func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+}
+
+func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
+ return x.UploadId
+ }
+ return ""
+}
+
+func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_AppendObjectSpec); ok {
+ return x.AppendObjectSpec
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
+ if x != nil {
+ return x.WriteOffset
+ }
+ return 0
+}
+
+func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+ if x != nil {
+ return x.StateLookup
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFlush() bool {
+ if x != nil {
+ return x.Flush
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
+ if x != nil {
+ return x.FinishWrite
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+type isBidiWriteObjectRequest_FirstMessage interface {
+ isBidiWriteObjectRequest_FirstMessage()
+}
+
+type BidiWriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+}
+
+type BidiWriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+}
+
+type BidiWriteObjectRequest_AppendObjectSpec struct {
+ // For appendable uploads. Describes the object to append to.
+ AppendObjectSpec *AppendObjectSpec `protobuf:"bytes,11,opt,name=append_object_spec,json=appendObjectSpec,proto3,oneof"`
+}
+
+func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
+
+func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+
+func (*BidiWriteObjectRequest_AppendObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+
+type isBidiWriteObjectRequest_Data interface {
+ isBidiWriteObjectRequest_Data()
+}
+
+type BidiWriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+}
+
+func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
+
+// Response message for BidiWriteObject.
+type BidiWriteObjectResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *BidiWriteObjectResponse_PersistedSize
+ // *BidiWriteObjectResponse_Resource
+ WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // An optional write handle that will periodically be present in response
+ // messages. Clients should save it for later use in establishing a new stream
+ // if a connection is interrupted.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,3,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
+}
+
+func (x *BidiWriteObjectResponse) Reset() {
+ *x = BidiWriteObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiWriteObjectResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiWriteObjectResponse) ProtoMessage() {}
+
+func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+}
+
+func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
+ }
+ return 0
+}
+
+func (x *BidiWriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle {
+ if x != nil {
+ return x.WriteHandle
+ }
+ return nil
+}
+
+type isBidiWriteObjectResponse_WriteStatus interface {
+ isBidiWriteObjectResponse_WriteStatus()
+}
+
+type BidiWriteObjectResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
+
+type BidiWriteObjectResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+}
+
+func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
+
+func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
+
+// Request message for ListObjects.
+type ListObjectsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the bucket in which to look for objects.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Maximum number of `items` plus `prefixes` to return
+ // in a single page of responses. As duplicate `prefixes` are
+ // omitted, fewer total results may be returned than requested. The service
+ // will use this parameter or 1,000 items, whichever is smaller.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // A previously-returned page token representing part of the larger set of
+ // results to view.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // If set, returns results in a directory-like mode. `items` will contain
+ // only objects whose names, aside from the `prefix`, do not
+ // contain `delimiter`. Objects whose names, aside from the
+ // `prefix`, contain `delimiter` will have their name,
+ // truncated after the `delimiter`, returned in
+ // `prefixes`. Duplicate `prefixes` are omitted.
+ Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
+ // If true, objects that end in exactly one instance of `delimiter`
+ // will have their metadata included in `items` in addition to
+ // `prefixes`.
+ IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
+ // Filter results to objects whose names begin with this prefix.
+ Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ // If `true`, lists all versions of an object as distinct results.
+ // For more information, see
+ // [Object
+ // Versioning](https://cloud.google.com/storage/docs/object-versioning).
+ Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
+ // Mask specifying which fields to read from each result.
+ // If no mask is specified, will default to all fields except items.acl and
+ // items.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically equal
+ // to or after lexicographic_start. If lexicographic_end is also set, the
+ // objects listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically
+ // before lexicographic_end. If lexicographic_start is also set, the objects
+ // listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
+ // Optional. If true, only list all soft-deleted versions of the object.
+ // Soft delete policy is required to set this option.
+ SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
+ // Optional. If true, will also include folders and managed folders (besides
+ // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+ IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
+ // Optional. Filter results to objects and prefixes that match this glob
+ // pattern. See [List Objects Using
+ // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+ // for the full syntax.
+ MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+}
+
+func (x *ListObjectsRequest) Reset() {
+ *x = ListObjectsRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListObjectsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListObjectsRequest) ProtoMessage() {}
+
+func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
+func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *ListObjectsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListObjectsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetDelimiter() string {
+ if x != nil {
+ return x.Delimiter
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
+ if x != nil {
+ return x.IncludeTrailingDelimiter
+ }
+ return false
+}
+
+func (x *ListObjectsRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetVersions() bool {
+ if x != nil {
+ return x.Versions
}
- return nil
+ return false
}
-func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.ReadMask
}
return nil
}
-func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ListObjectsRequest) GetLexicographicStart() string {
if x != nil {
- return x.ObjectChecksums
+ return x.LexicographicStart
}
- return nil
+ return ""
}
-// A rewrite response.
-type RewriteResponse struct {
+func (x *ListObjectsRequest) GetLexicographicEnd() string {
+ if x != nil {
+ return x.LexicographicEnd
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetSoftDeleted() bool {
+ if x != nil {
+ return x.SoftDeleted
+ }
+ return false
+}
+
+func (x *ListObjectsRequest) GetIncludeFoldersAsPrefixes() bool {
+ if x != nil {
+ return x.IncludeFoldersAsPrefixes
+ }
+ return false
+}
+
+func (x *ListObjectsRequest) GetMatchGlob() string {
+ if x != nil {
+ return x.MatchGlob
+ }
+ return ""
+}
+
+// Request object for `QueryWriteStatus`.
+type QueryWriteStatusRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The total bytes written so far, which can be used to provide a waiting user
- // with a progress indicator. This property is always present in the response.
- TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"`
- // The total size of the object being copied in bytes. This property is always
- // present in the response.
- ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"`
- // `true` if the copy is finished; otherwise, `false` if
- // the copy is in progress. This property is always present in the response.
- Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
- // A token to use in subsequent requests to continue copying data. This token
- // is present in the response only when there is more data to copy.
- RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
- // A resource containing the metadata for the copied-to object. This property
- // is present in the response only when copying completes.
- Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Required. The name of the resume token for the object whose write status is
+ // being requested.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,2,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *RewriteResponse) Reset() {
- *x = RewriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *QueryWriteStatusRequest) Reset() {
+ *x = QueryWriteStatusRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *RewriteResponse) String() string {
+func (x *QueryWriteStatusRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*RewriteResponse) ProtoMessage() {}
+func (*QueryWriteStatusRequest) ProtoMessage() {}
-func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3231,82 +3400,238 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
-func (*RewriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
+// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
}
-func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
+func (x *QueryWriteStatusRequest) GetUploadId() string {
if x != nil {
- return x.TotalBytesRewritten
+ return x.UploadId
}
- return 0
+ return ""
}
-func (x *RewriteResponse) GetObjectSize() int64 {
+func (x *QueryWriteStatusRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.ObjectSize
+ return x.CommonObjectRequestParams
}
- return 0
+ return nil
}
-func (x *RewriteResponse) GetDone() bool {
+// Response object for `QueryWriteStatus`.
+type QueryWriteStatusResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *QueryWriteStatusResponse_PersistedSize
+ // *QueryWriteStatusResponse_Resource
+ WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"`
+}
+
+func (x *QueryWriteStatusResponse) Reset() {
+ *x = QueryWriteStatusResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryWriteStatusResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryWriteStatusResponse) ProtoMessage() {}
+
+func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
if x != nil {
- return x.Done
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return false
+ return mi.MessageOf(x)
}
-func (x *RewriteResponse) GetRewriteToken() string {
- if x != nil {
- return x.RewriteToken
+// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
+func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
+}
+
+func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
}
- return ""
+ return nil
}
-func (x *RewriteResponse) GetResource() *Object {
- if x != nil {
+func (x *QueryWriteStatusResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_PersistedSize); ok {
+ return x.PersistedSize
+ }
+ return 0
+}
+
+func (x *QueryWriteStatusResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*QueryWriteStatusResponse_Resource); ok {
return x.Resource
}
return nil
}
-// Request message StartResumableWrite.
-type StartResumableWriteRequest struct {
+type isQueryWriteStatusResponse_WriteStatus interface {
+ isQueryWriteStatusResponse_WriteStatus()
+}
+
+type QueryWriteStatusResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. This is the correct value for the
+ // 'write_offset' field to use when resuming the `WriteObject` operation.
+ // Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
+
+type QueryWriteStatusResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+}
+
+func (*QueryWriteStatusResponse_PersistedSize) isQueryWriteStatusResponse_WriteStatus() {}
+
+func (*QueryWriteStatusResponse_Resource) isQueryWriteStatusResponse_WriteStatus() {}
+
+// Request message for RewriteObject.
+// If the source object is encrypted using a Customer-Supplied Encryption Key
+// the key information must be provided in the copy_source_encryption_algorithm,
+// copy_source_encryption_key_bytes, and copy_source_encryption_key_sha256_bytes
+// fields. If the destination object should be encrypted the keying information
+// should be provided in the encryption_algorithm, encryption_key_bytes, and
+// encryption_key_sha256_bytes fields of the
+// common_object_request_params.customer_encryption field.
+type RewriteObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The destination bucket, object, and metadata, as well as any
- // preconditions.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"`
+ // Required. Immutable. The name of the destination object.
+ // See the
+ // [Naming Guidelines](https://cloud.google.com/storage/docs/objects#naming).
+ // Example: `test.txt`
+ // The `name` field by itself does not uniquely identify a Cloud Storage
+ // object. A Cloud Storage object is uniquely identified by the tuple of
+ // (bucket, object, generation).
+ DestinationName string `protobuf:"bytes,24,opt,name=destination_name,json=destinationName,proto3" json:"destination_name,omitempty"`
+ // Required. Immutable. The name of the bucket containing the destination
+ // object.
+ DestinationBucket string `protobuf:"bytes,25,opt,name=destination_bucket,json=destinationBucket,proto3" json:"destination_bucket,omitempty"`
+ // The name of the Cloud KMS key that will be used to encrypt the destination
+ // object. The Cloud KMS key must be located in same location as the object.
+ // If the parameter is not specified, the request uses the destination
+ // bucket's default encryption key, if any, or else the Google-managed
+ // encryption key.
+ DestinationKmsKey string `protobuf:"bytes,27,opt,name=destination_kms_key,json=destinationKmsKey,proto3" json:"destination_kms_key,omitempty"`
+ // Properties of the destination, post-rewrite object.
+ // The `name`, `bucket` and `kms_key` fields must not be populated (these
+ // values are specified in the `destination_name`, `destination_bucket`, and
+ // `destination_kms_key` fields).
+ // If `destination` is present it will be used to construct the destination
+ // object's metadata; otherwise the destination object's metadata will be
+ // copied from the source object.
+ Destination *Object `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
+ // Required. Name of the bucket in which to find the source object.
+ SourceBucket string `protobuf:"bytes,2,opt,name=source_bucket,json=sourceBucket,proto3" json:"source_bucket,omitempty"`
+ // Required. Name of the source object.
+ SourceObject string `protobuf:"bytes,3,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"`
+ // If present, selects a specific revision of the source object (as opposed to
+ // the latest version, the default).
+ SourceGeneration int64 `protobuf:"varint,4,opt,name=source_generation,json=sourceGeneration,proto3" json:"source_generation,omitempty"`
+ // Include this field (from the previous rewrite response) on each rewrite
+ // request after the first one, until the rewrite response 'done' flag is
+ // true. Calls that provide a rewriteToken can omit all other request fields,
+ // but if included those fields must match the values provided in the first
+ // rewrite request.
+ RewriteToken string `protobuf:"bytes,5,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
+ // Apply a predefined set of access controls to the destination object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ DestinationPredefinedAcl string `protobuf:"bytes,28,opt,name=destination_predefined_acl,json=destinationPredefinedAcl,proto3" json:"destination_predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,7,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,8,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the destination object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,9,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the destination object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // Makes the operation conditional on whether the source object's live
+ // generation matches the given value.
+ IfSourceGenerationMatch *int64 `protobuf:"varint,11,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"`
+ // Makes the operation conditional on whether the source object's live
+ // generation does not match the given value.
+ IfSourceGenerationNotMatch *int64 `protobuf:"varint,12,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the source object's current
+ // metageneration matches the given value.
+ IfSourceMetagenerationMatch *int64 `protobuf:"varint,13,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the source object's current
+ // metageneration does not match the given value.
+ IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,14,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"`
+ // The maximum number of bytes that will be rewritten per rewrite request.
+ // Most callers
+ // shouldn't need to specify this parameter - it is primarily in place to
+ // support testing. If specified the value must be an integral multiple of
+ // 1 MiB (1048576). Also, this only applies to requests where the source and
+ // destination span locations and/or storage classes. Finally, this value must
+ // not change across rewrite calls else you'll get an error that the
+ // `rewriteToken` is invalid.
+ MaxBytesRewrittenPerCall int64 `protobuf:"varint,15,opt,name=max_bytes_rewritten_per_call,json=maxBytesRewrittenPerCall,proto3" json:"max_bytes_rewritten_per_call,omitempty"`
+ // The algorithm used to encrypt the source object, if any. Used if the source
+ // object was encrypted with a Customer-Supplied Encryption Key.
+ CopySourceEncryptionAlgorithm string `protobuf:"bytes,16,opt,name=copy_source_encryption_algorithm,json=copySourceEncryptionAlgorithm,proto3" json:"copy_source_encryption_algorithm,omitempty"`
+ // The raw bytes (not base64-encoded) AES-256 encryption key used to encrypt
+ // the source object, if it was encrypted with a Customer-Supplied Encryption
+ // Key.
+ CopySourceEncryptionKeyBytes []byte `protobuf:"bytes,21,opt,name=copy_source_encryption_key_bytes,json=copySourceEncryptionKeyBytes,proto3" json:"copy_source_encryption_key_bytes,omitempty"`
+ // The raw bytes (not base64-encoded) SHA256 hash of the encryption key used
+ // to encrypt the source object, if it was encrypted with a Customer-Supplied
+ // Encryption Key.
+ CopySourceEncryptionKeySha256Bytes []byte `protobuf:"bytes,22,opt,name=copy_source_encryption_key_sha256_bytes,json=copySourceEncryptionKeySha256Bytes,proto3" json:"copy_source_encryption_key_sha256_bytes,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
// The checksums of the complete object. This will be used to validate the
- // uploaded object. For each upload, object_checksums can be provided with
- // either StartResumableWriteRequest or the WriteObjectRequest with
- // finish_write set to `true`.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // destination object after rewriting.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *StartResumableWriteRequest) Reset() {
- *x = StartResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *RewriteObjectRequest) Reset() {
+ *x = RewriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *StartResumableWriteRequest) String() string {
+func (x *RewriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*StartResumableWriteRequest) ProtoMessage() {}
+func (*RewriteObjectRequest) ProtoMessage() {}
-func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3316,243 +3641,211 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
-func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
}
-func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
+func (x *RewriteObjectRequest) GetDestinationName() string {
if x != nil {
- return x.WriteObjectSpec
+ return x.DestinationName
}
- return nil
+ return ""
}
-func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *RewriteObjectRequest) GetDestinationBucket() string {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.DestinationBucket
}
- return nil
+ return ""
}
-func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *RewriteObjectRequest) GetDestinationKmsKey() string {
if x != nil {
- return x.ObjectChecksums
+ return x.DestinationKmsKey
}
- return nil
-}
-
-// Response object for `StartResumableWrite`.
-type StartResumableWriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The upload_id of the newly started resumable write operation. This
- // value should be copied into the `WriteObjectRequest.upload_id` field.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
+ return ""
}
-func (x *StartResumableWriteResponse) Reset() {
- *x = StartResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *RewriteObjectRequest) GetDestination() *Object {
+ if x != nil {
+ return x.Destination
}
+ return nil
}
-func (x *StartResumableWriteResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *RewriteObjectRequest) GetSourceBucket() string {
+ if x != nil {
+ return x.SourceBucket
+ }
+ return ""
}
-func (*StartResumableWriteResponse) ProtoMessage() {}
-
-func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteObjectRequest) GetSourceObject() string {
+ if x != nil {
+ return x.SourceObject
}
- return mi.MessageOf(x)
+ return ""
}
-// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
-func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+func (x *RewriteObjectRequest) GetSourceGeneration() int64 {
+ if x != nil {
+ return x.SourceGeneration
+ }
+ return 0
}
-func (x *StartResumableWriteResponse) GetUploadId() string {
+func (x *RewriteObjectRequest) GetRewriteToken() string {
if x != nil {
- return x.UploadId
+ return x.RewriteToken
}
return ""
}
-// Request message for UpdateObject.
-type UpdateObjectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The object to update.
- // The object's bucket and name fields are used to identify the object to
- // update. If present, the object's generation field selects a specific
- // revision of this object whose metadata should be updated. Otherwise,
- // assumes the live version of the object.
- Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Required. List of fields to be updated.
- //
- // To specify ALL fields, equivalent to the JSON API's "update" function,
- // specify a single field with the value `*`. Note: not recommended. If a new
- // field is introduced at a later time, an older client updating with the `*`
- // may accidentally reset the new field's value.
- //
- // Not specifying any fields is an error.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string {
+ if x != nil {
+ return x.DestinationPredefinedAcl
+ }
+ return ""
}
-func (x *UpdateObjectRequest) Reset() {
- *x = UpdateObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
+ return 0
}
-func (x *UpdateObjectRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (*UpdateObjectRequest) ProtoMessage() {}
-
-func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
-func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
}
-func (x *UpdateObjectRequest) GetObject() *Object {
- if x != nil {
- return x.Object
+func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 {
+ if x != nil && x.IfSourceGenerationMatch != nil {
+ return *x.IfSourceGenerationMatch
}
- return nil
+ return 0
}
-func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 {
+ if x != nil && x.IfSourceGenerationNotMatch != nil {
+ return *x.IfSourceGenerationNotMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
+func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationMatch != nil {
+ return *x.IfSourceMetagenerationMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
+func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationNotMatch != nil {
+ return *x.IfSourceMetagenerationNotMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
+func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 {
+ if x != nil {
+ return x.MaxBytesRewrittenPerCall
}
return 0
}
-func (x *UpdateObjectRequest) GetPredefinedAcl() string {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string {
if x != nil {
- return x.PredefinedAcl
+ return x.CopySourceEncryptionAlgorithm
}
return ""
}
-func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte {
if x != nil {
- return x.UpdateMask
+ return x.CopySourceEncryptionKeyBytes
}
return nil
}
-func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte {
+ if x != nil {
+ return x.CopySourceEncryptionKeySha256Bytes
+ }
+ return nil
+}
+
+func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.ObjectChecksums
}
return nil
}
-// Request message for GetServiceAccount.
-type GetServiceAccountRequest struct {
+// A rewrite response.
+type RewriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Project ID, in the format of "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
+ // The total bytes written so far, which can be used to provide a waiting user
+ // with a progress indicator. This property is always present in the response.
+ TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"`
+ // The total size of the object being copied in bytes. This property is always
+ // present in the response.
+ ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"`
+ // `true` if the copy is finished; otherwise, `false` if
+ // the copy is in progress. This property is always present in the response.
+ Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ // A token to use in subsequent requests to continue copying data. This token
+ // is present in the response only when there is more data to copy.
+ RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
+ // A resource containing the metadata for the copied-to object. This property
+ // is present in the response only when copying completes.
+ Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"`
}
-func (x *GetServiceAccountRequest) Reset() {
- *x = GetServiceAccountRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *RewriteResponse) Reset() {
+ *x = RewriteResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *GetServiceAccountRequest) String() string {
+func (x *RewriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetServiceAccountRequest) ProtoMessage() {}
+func (*RewriteResponse) ProtoMessage() {}
-func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3562,109 +3855,124 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead.
-func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
+// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
+func (*RewriteResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
}
-func (x *GetServiceAccountRequest) GetProject() string {
+func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
if x != nil {
- return x.Project
+ return x.TotalBytesRewritten
}
- return ""
-}
-
-// Request message for CreateHmacKey.
-type CreateHmacKeyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The project that the HMAC-owning service account lives in, in the
- // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
- // project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // Required. The service account to create the HMAC for.
- ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ return 0
}
-func (x *CreateHmacKeyRequest) Reset() {
- *x = CreateHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *RewriteResponse) GetObjectSize() int64 {
+ if x != nil {
+ return x.ObjectSize
}
+ return 0
}
-func (x *CreateHmacKeyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateHmacKeyRequest) ProtoMessage() {}
-
-func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteResponse) GetDone() bool {
+ if x != nil {
+ return x.Done
}
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
+ return false
}
-func (x *CreateHmacKeyRequest) GetProject() string {
+func (x *RewriteResponse) GetRewriteToken() string {
if x != nil {
- return x.Project
+ return x.RewriteToken
}
return ""
}
-func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+func (x *RewriteResponse) GetResource() *Object {
if x != nil {
- return x.ServiceAccountEmail
+ return x.Resource
}
- return ""
+ return nil
}
-// Create hmac response. The only time the secret for an HMAC will be returned.
-type CreateHmacKeyResponse struct {
+// Request message for MoveObject.
+type MoveObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Key metadata.
- Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // HMAC key secret material.
- // In raw bytes format (not base64-encoded).
- SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"`
-}
-
-func (x *CreateHmacKeyResponse) Reset() {
- *x = CreateHmacKeyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. Name of the source object.
+ SourceObject string `protobuf:"bytes,2,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"`
+ // Required. Name of the destination object.
+ DestinationObject string `protobuf:"bytes,3,opt,name=destination_object,json=destinationObject,proto3" json:"destination_object,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current generation matches the given value. `if_source_generation_match`
+ // and `if_source_generation_not_match` conditions are mutually exclusive:
+ // it's an error for both of them to be set in the request.
+ IfSourceGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current generation does not match the given value.
+ // `if_source_generation_match` and `if_source_generation_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current metageneration matches the given value.
+ // `if_source_metageneration_match` and `if_source_metageneration_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current metageneration does not match the given value.
+ // `if_source_metageneration_match` and `if_source_metageneration_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current generation matches the given value. Setting to 0 makes the
+ // operation succeed only if there are no live versions of the object.
+ // `if_generation_match` and `if_generation_not_match` conditions are mutually
+ // exclusive: it's an error for both of them to be set in the request.
+ IfGenerationMatch *int64 `protobuf:"varint,8,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current generation does not match the given value. If no live
+ // object exists, the precondition fails. Setting to 0 makes the operation
+ // succeed only if there is a live version of the object.
+ // `if_generation_match` and `if_generation_not_match` conditions are mutually
+ // exclusive: it's an error for both of them to be set in the request.
+ IfGenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current metageneration matches the given value.
+ // `if_metageneration_match` and `if_metageneration_not_match` conditions are
+ // mutually exclusive: it's an error for both of them to be set in the
+ // request.
+ IfMetagenerationMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current metageneration does not match the given value.
+ // `if_metageneration_match` and `if_metageneration_not_match` conditions are
+ // mutually exclusive: it's an error for both of them to be set in the
+ // request.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,11,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+}
+
+func (x *MoveObjectRequest) Reset() {
+ *x = MoveObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *CreateHmacKeyResponse) String() string {
+func (x *MoveObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateHmacKeyResponse) ProtoMessage() {}
+func (*MoveObjectRequest) ProtoMessage() {}
-func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *MoveObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3674,117 +3982,122 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
+// Deprecated: Use MoveObjectRequest.ProtoReflect.Descriptor instead.
+func (*MoveObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
}
-func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata {
+func (x *MoveObjectRequest) GetBucket() string {
if x != nil {
- return x.Metadata
+ return x.Bucket
}
- return nil
+ return ""
}
-func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte {
+func (x *MoveObjectRequest) GetSourceObject() string {
if x != nil {
- return x.SecretKeyBytes
+ return x.SourceObject
}
- return nil
+ return ""
}
-// Request object to delete a given HMAC key.
-type DeleteHmacKeyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+func (x *MoveObjectRequest) GetDestinationObject() string {
+ if x != nil {
+ return x.DestinationObject
+ }
+ return ""
+}
- // Required. The identifying key for the HMAC to delete.
- AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Required. The project that owns the HMAC key, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"`
+func (x *MoveObjectRequest) GetIfSourceGenerationMatch() int64 {
+ if x != nil && x.IfSourceGenerationMatch != nil {
+ return *x.IfSourceGenerationMatch
+ }
+ return 0
}
-func (x *DeleteHmacKeyRequest) Reset() {
- *x = DeleteHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *MoveObjectRequest) GetIfSourceGenerationNotMatch() int64 {
+ if x != nil && x.IfSourceGenerationNotMatch != nil {
+ return *x.IfSourceGenerationNotMatch
}
+ return 0
}
-func (x *DeleteHmacKeyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *MoveObjectRequest) GetIfSourceMetagenerationMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationMatch != nil {
+ return *x.IfSourceMetagenerationMatch
+ }
+ return 0
}
-func (*DeleteHmacKeyRequest) ProtoMessage() {}
+func (x *MoveObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationNotMatch != nil {
+ return *x.IfSourceMetagenerationNotMatch
+ }
+ return 0
+}
-func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *MoveObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
+func (x *MoveObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (x *DeleteHmacKeyRequest) GetAccessId() string {
- if x != nil {
- return x.AccessId
+func (x *MoveObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
}
- return ""
+ return 0
}
-func (x *DeleteHmacKeyRequest) GetProject() string {
- if x != nil {
- return x.Project
+func (x *MoveObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return ""
+ return 0
}
-// Request object to get metadata on a given HMAC key.
-type GetHmacKeyRequest struct {
+// Request message StartResumableWrite.
+type StartResumableWriteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The identifying key for the HMAC to delete.
- AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Required. The project the HMAC key lies in, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"`
+ // Required. Contains the information necessary to start a resumable write.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"`
+ // A set of parameters common to Storage API requests related to an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The checksums of the complete object. This is used to validate the
+ // uploaded object. For each upload, `object_checksums` can be provided when
+ // initiating a resumable upload with`StartResumableWriteRequest` or when
+ // completing a write with `WriteObjectRequest` with
+ // `finish_write` set to `true`.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *GetHmacKeyRequest) Reset() {
- *x = GetHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *StartResumableWriteRequest) Reset() {
+ *x = StartResumableWriteRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *GetHmacKeyRequest) String() string {
+func (x *StartResumableWriteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetHmacKeyRequest) ProtoMessage() {}
+func (*StartResumableWriteRequest) ProtoMessage() {}
-func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3794,63 +4107,62 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
+func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
}
-func (x *GetHmacKeyRequest) GetAccessId() string {
+func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
if x != nil {
- return x.AccessId
+ return x.WriteObjectSpec
}
- return ""
+ return nil
}
-func (x *GetHmacKeyRequest) GetProject() string {
+func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.Project
+ return x.CommonObjectRequestParams
}
- return ""
+ return nil
+}
+
+func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
}
-// Request to fetch a list of HMAC keys under a given project.
-type ListHmacKeysRequest struct {
+// Response object for `StartResumableWrite`.
+type StartResumableWriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The project to list HMAC keys for, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // The maximum number of keys to return.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously returned token from ListHmacKeysResponse to get the next page.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, filters to only return HMAC keys for specified service account.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // If set, return deleted keys that have not yet been wiped out.
- ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"`
+ // A unique identifier for the initiated resumable write operation.
+ // As the ID grants write access, you should keep it confidential during
+ // the upload to prevent unauthorized access and data tampering during your
+ // upload. This ID should be included in subsequent `WriteObject` requests to
+ // upload the object data.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
}
-func (x *ListHmacKeysRequest) Reset() {
- *x = ListHmacKeysRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *StartResumableWriteResponse) Reset() {
+ *x = StartResumableWriteResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListHmacKeysRequest) String() string {
+func (x *StartResumableWriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListHmacKeysRequest) ProtoMessage() {}
+func (*StartResumableWriteResponse) ProtoMessage() {}
-func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3860,77 +4172,78 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead.
-func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
-}
-
-func (x *ListHmacKeysRequest) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *ListHmacKeysRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListHmacKeysRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
+// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
+func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
}
-func (x *ListHmacKeysRequest) GetServiceAccountEmail() string {
+func (x *StartResumableWriteResponse) GetUploadId() string {
if x != nil {
- return x.ServiceAccountEmail
+ return x.UploadId
}
return ""
}
-func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool {
- if x != nil {
- return x.ShowDeletedKeys
- }
- return false
-}
-
-// Hmac key list response with next page information.
-type ListHmacKeysResponse struct {
+// Request message for UpdateObject.
+type UpdateObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The list of items.
- HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"`
- // The continuation token, used to page through large result sets. Provide
- // this value in a subsequent request to return the next page of results.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Required. The object to update.
+ // The object's bucket and name fields are used to identify the object to
+ // update. If present, the object's generation field selects a specific
+ // revision of this object whose metadata should be updated. Otherwise,
+ // assumes the live version of the object.
+ Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Required. List of fields to be updated.
+ //
+ // To specify ALL fields, equivalent to the JSON API's "update" function,
+ // specify a single field with the value `*`. Note: not recommended. If a new
+ // field is introduced at a later time, an older client updating with the `*`
+ // may accidentally reset the new field's value.
+ //
+ // Not specifying any fields is an error.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
-func (x *ListHmacKeysResponse) Reset() {
- *x = ListHmacKeysResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *UpdateObjectRequest) Reset() {
+ *x = UpdateObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListHmacKeysResponse) String() string {
+func (x *UpdateObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListHmacKeysResponse) ProtoMessage() {}
+func (*UpdateObjectRequest) ProtoMessage() {}
-func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3940,87 +4253,63 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead.
-func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
+// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
+func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
}
-func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata {
+func (x *UpdateObjectRequest) GetObject() *Object {
if x != nil {
- return x.HmacKeys
+ return x.Object
}
return nil
}
-func (x *ListHmacKeysResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
+func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return ""
+ return 0
}
-// Request object to update an HMAC key state.
-// HmacKeyMetadata.state is required and the only writable field in
-// UpdateHmacKey operation. Specifying fields other than state will result in an
-// error.
-type UpdateHmacKeyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The HMAC key to update.
- // If present, the hmac_key's `id` field will be used to identify the key.
- // Otherwise, the hmac_key's access_id and project fields will be used to
- // identify the key.
- HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"`
- // Update mask for hmac_key.
- // Not specifying any fields will mean only the `state` field is updated to
- // the value specified in `hmac_key`.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateHmacKeyRequest) Reset() {
- *x = UpdateHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
}
+ return 0
}
-func (x *UpdateHmacKeyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
}
-func (*UpdateHmacKeyRequest) ProtoMessage() {}
-
-func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
+func (x *UpdateObjectRequest) GetPredefinedAcl() string {
+ if x != nil {
+ return x.PredefinedAcl
+ }
+ return ""
}
-func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata {
+func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
- return x.HmacKey
+ return x.UpdateMask
}
return nil
}
-func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.UpdateMask
+ return x.CommonObjectRequestParams
}
return nil
}
@@ -4044,11 +4333,9 @@ type CommonObjectRequestParams struct {
func (x *CommonObjectRequestParams) Reset() {
*x = CommonObjectRequestParams{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CommonObjectRequestParams) String() string {
@@ -4059,7 +4346,7 @@ func (*CommonObjectRequestParams) ProtoMessage() {}
func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4104,11 +4391,9 @@ type ServiceConstants struct {
func (x *ServiceConstants) Reset() {
*x = ServiceConstants{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceConstants) String() string {
@@ -4119,7 +4404,7 @@ func (*ServiceConstants) ProtoMessage() {}
func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4248,11 +4533,16 @@ type Bucket struct {
// Reserved for future use.
SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"`
// Configuration that, if present, specifies the data placement for a
- // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region].
+ // [https://cloud.google.com/storage/docs/locations#location-dr][configurable
+ // dual-region].
CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"`
// The bucket's Autoclass configuration. If there is no configuration, the
// Autoclass feature will be disabled and have no effect on the bucket.
Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"`
+ // Optional. The bucket's hierarchical namespace configuration. If there is no
+ // configuration, the hierarchical namespace feature will be disabled and have
+ // no effect on the bucket.
+ HierarchicalNamespace *Bucket_HierarchicalNamespace `protobuf:"bytes,32,opt,name=hierarchical_namespace,json=hierarchicalNamespace,proto3" json:"hierarchical_namespace,omitempty"`
// Optional. The bucket's soft delete policy. The soft delete policy prevents
// soft-deleted objects from being permanently deleted.
SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"`
@@ -4260,11 +4550,9 @@ type Bucket struct {
func (x *Bucket) Reset() {
*x = Bucket{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket) String() string {
@@ -4275,7 +4563,7 @@ func (*Bucket) ProtoMessage() {}
func (x *Bucket) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4486,6 +4774,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass {
return nil
}
+func (x *Bucket) GetHierarchicalNamespace() *Bucket_HierarchicalNamespace {
+ if x != nil {
+ return x.HierarchicalNamespace
+ }
+ return nil
+}
+
func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy {
if x != nil {
return x.SoftDeletePolicy
@@ -4542,11 +4837,9 @@ type BucketAccessControl struct {
func (x *BucketAccessControl) Reset() {
*x = BucketAccessControl{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BucketAccessControl) String() string {
@@ -4557,7 +4850,7 @@ func (*BucketAccessControl) ProtoMessage() {}
func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4650,11 +4943,9 @@ type ChecksummedData struct {
func (x *ChecksummedData) Reset() {
*x = ChecksummedData{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ChecksummedData) String() string {
@@ -4665,7 +4956,7 @@ func (*ChecksummedData) ProtoMessage() {}
func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4710,18 +5001,16 @@ type ObjectChecksums struct {
// [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and
// ETags: Best Practices].
// Not all objects will provide an MD5 hash. For example, composite objects
- // provide only crc32c hashes.
- // This value is equivalent to running `cat object.txt | openssl md5 -binary`
+ // provide only crc32c hashes. This value is equivalent to running `cat
+ // object.txt | openssl md5 -binary`
Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"`
}
func (x *ObjectChecksums) Reset() {
*x = ObjectChecksums{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ObjectChecksums) String() string {
@@ -4732,7 +5021,7 @@ func (*ObjectChecksums) ProtoMessage() {}
func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4747,248 +5036,18 @@ func (*ObjectChecksums) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
}
-func (x *ObjectChecksums) GetCrc32C() uint32 {
- if x != nil && x.Crc32C != nil {
- return *x.Crc32C
- }
- return 0
-}
-
-func (x *ObjectChecksums) GetMd5Hash() []byte {
- if x != nil {
- return x.Md5Hash
- }
- return nil
-}
-
-// Hmac Key Metadata, which includes all information other than the secret.
-type HmacKeyMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Immutable. Resource name ID of the key in the format
- // {projectIdentifier}/{accessId}.
- // {projectIdentifier} can be the project ID or project number.
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // Immutable. Globally unique id for keys.
- AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Immutable. Identifies the project that owns the service account of the
- // specified HMAC key, in the format "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
- // Output only. Email of the service account the key authenticates as.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // State of the key. One of ACTIVE, INACTIVE, or DELETED.
- // Writable, can be updated by UpdateHmacKey operation.
- State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
- // Output only. The creation time of the HMAC key.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The last modification time of the HMAC key metadata.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // The etag of the HMAC key.
- Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *HmacKeyMetadata) Reset() {
- *x = HmacKeyMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HmacKeyMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HmacKeyMetadata) ProtoMessage() {}
-
-func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
-func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
-}
-
-func (x *HmacKeyMetadata) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetAccessId() string {
- if x != nil {
- return x.AccessId
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
- if x != nil {
- return x.ServiceAccountEmail
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetState() string {
- if x != nil {
- return x.State
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-// A directive to publish Pub/Sub notifications upon changes to a bucket.
-type NotificationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The resource name of this NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- // The `{project}` portion may be `_` for globally unique buckets.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The Pub/Sub topic to which this subscription publishes. Formatted
- // as:
- // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
- Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
- // The etag of the NotificationConfig.
- // If included in the metadata of GetNotificationConfigRequest, the operation
- // will only be performed if the etag matches that of the NotificationConfig.
- Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
- // If present, only send notifications about listed event types. If
- // empty, sent notifications for all event types.
- EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
- // A list of additional attributes to attach to each Pub/Sub
- // message published for this NotificationConfig.
- CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // If present, only apply this NotificationConfig to object names that
- // begin with this prefix.
- ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
- // Required. The desired content of the Payload.
- PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
-}
-
-func (x *NotificationConfig) Reset() {
- *x = NotificationConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NotificationConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*NotificationConfig) ProtoMessage() {}
-
-func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
-func (*NotificationConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
-}
-
-func (x *NotificationConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *NotificationConfig) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEventTypes() []string {
- if x != nil {
- return x.EventTypes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetCustomAttributes() map[string]string {
- if x != nil {
- return x.CustomAttributes
- }
- return nil
-}
-
-func (x *NotificationConfig) GetObjectNamePrefix() string {
- if x != nil {
- return x.ObjectNamePrefix
+func (x *ObjectChecksums) GetCrc32C() uint32 {
+ if x != nil && x.Crc32C != nil {
+ return *x.Crc32C
}
- return ""
+ return 0
}
-func (x *NotificationConfig) GetPayloadFormat() string {
+func (x *ObjectChecksums) GetMd5Hash() []byte {
if x != nil {
- return x.PayloadFormat
+ return x.Md5Hash
}
- return ""
+ return nil
}
// Describes the Customer-Supplied Encryption Key mechanism used to store an
@@ -5007,11 +5066,9 @@ type CustomerEncryption struct {
func (x *CustomerEncryption) Reset() {
*x = CustomerEncryption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CustomerEncryption) String() string {
@@ -5021,8 +5078,8 @@ func (x *CustomerEncryption) String() string {
func (*CustomerEncryption) ProtoMessage() {}
func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5034,7 +5091,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead.
func (*CustomerEncryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
}
func (x *CustomerEncryption) GetEncryptionAlgorithm() string {
@@ -5075,6 +5132,10 @@ type Object struct {
// Immutable. The content generation of this object. Used for object
// versioning.
Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Output only. Restore token used to differentiate deleted objects with the
+ // same name and generation. This field is output only, and only set for
+ // deleted objects in HNS buckets.
+ RestoreToken *string `protobuf:"bytes,35,opt,name=restore_token,json=restoreToken,proto3,oneof" json:"restore_token,omitempty"`
// Output only. The version of the metadata for this generation of this
// object. Used for preconditions and for detecting changes in metadata. A
// metageneration number is only meaningful in the context of a particular
@@ -5106,6 +5167,8 @@ type Object struct {
// Output only. If this object is noncurrent, this is the time when the object
// became noncurrent.
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
+ // Output only. The time when the object was finalized.
+ FinalizeTime *timestamppb.Timestamp `protobuf:"bytes,36,opt,name=finalize_time,json=finalizeTime,proto3" json:"finalize_time,omitempty"`
// Content-Type of the object data, matching
// [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5].
// If an object is stored without a Content-Type, it is served as
@@ -5117,7 +5180,10 @@ type Object struct {
// Components are accumulated by compose operations.
ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"`
// Output only. Hashes for the data part of this object. This field is used
- // for output only and will be silently ignored if provided in requests.
+ // for output only and will be silently ignored if provided in requests. The
+ // checksums of the complete object regardless of data range. If the object is
+ // downloaded in full, the client should compute one of these checksums over
+ // the downloaded object and compare it against the value provided here.
Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"`
// Output only. The modification time of the object metadata.
// Set initially to object creation time and then updated whenever any
@@ -5168,15 +5234,23 @@ type Object struct {
CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"`
// A user-specified timestamp set on an object.
CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"`
+ // Output only. This is the time when the object became soft-deleted.
+ //
+ // Soft-deleted objects are only accessible if a soft_delete_policy is
+ // enabled. Also see hard_delete_time.
+ SoftDeleteTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=soft_delete_time,json=softDeleteTime,proto3,oneof" json:"soft_delete_time,omitempty"`
+ // Output only. The time when the object will be permanently deleted.
+ //
+ // Only set when an object becomes soft-deleted with a soft_delete_policy.
+ // Otherwise, the object will not be accessible.
+ HardDeleteTime *timestamppb.Timestamp `protobuf:"bytes,29,opt,name=hard_delete_time,json=hardDeleteTime,proto3,oneof" json:"hard_delete_time,omitempty"`
}
func (x *Object) Reset() {
*x = Object{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Object) String() string {
@@ -5186,8 +5260,8 @@ func (x *Object) String() string {
func (*Object) ProtoMessage() {}
func (x *Object) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5199,7 +5273,7 @@ func (x *Object) ProtoReflect() protoreflect.Message {
// Deprecated: Use Object.ProtoReflect.Descriptor instead.
func (*Object) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
}
func (x *Object) GetName() string {
@@ -5230,6 +5304,13 @@ func (x *Object) GetGeneration() int64 {
return 0
}
+func (x *Object) GetRestoreToken() string {
+ if x != nil && x.RestoreToken != nil {
+ return *x.RestoreToken
+ }
+ return ""
+}
+
func (x *Object) GetMetageneration() int64 {
if x != nil {
return x.Metageneration
@@ -5293,6 +5374,13 @@ func (x *Object) GetDeleteTime() *timestamppb.Timestamp {
return nil
}
+func (x *Object) GetFinalizeTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.FinalizeTime
+ }
+ return nil
+}
+
func (x *Object) GetContentType() string {
if x != nil {
return x.ContentType
@@ -5391,13 +5479,30 @@ func (x *Object) GetCustomTime() *timestamppb.Timestamp {
return nil
}
+func (x *Object) GetSoftDeleteTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.SoftDeleteTime
+ }
+ return nil
+}
+
+func (x *Object) GetHardDeleteTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.HardDeleteTime
+ }
+ return nil
+}
+
// An access-control entry.
type ObjectAccessControl struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The access permission for the entity.
+ // The access permission for the entity. One of the following values:
+ // * `READER`
+ // * `WRITER`
+ // * `OWNER`
Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
// The ID of the access-control entry.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
@@ -5440,11 +5545,9 @@ type ObjectAccessControl struct {
func (x *ObjectAccessControl) Reset() {
*x = ObjectAccessControl{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ObjectAccessControl) String() string {
@@ -5454,8 +5557,8 @@ func (x *ObjectAccessControl) String() string {
func (*ObjectAccessControl) ProtoMessage() {}
func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5467,7 +5570,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead.
func (*ObjectAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
}
func (x *ObjectAccessControl) GetRole() string {
@@ -5551,11 +5654,9 @@ type ListObjectsResponse struct {
func (x *ListObjectsResponse) Reset() {
*x = ListObjectsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListObjectsResponse) String() string {
@@ -5565,8 +5666,8 @@ func (x *ListObjectsResponse) String() string {
func (*ListObjectsResponse) ProtoMessage() {}
func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5578,7 +5679,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead.
func (*ListObjectsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
}
func (x *ListObjectsResponse) GetObjects() []*Object {
@@ -5616,11 +5717,9 @@ type ProjectTeam struct {
func (x *ProjectTeam) Reset() {
*x = ProjectTeam{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProjectTeam) String() string {
@@ -5630,8 +5729,8 @@ func (x *ProjectTeam) String() string {
func (*ProjectTeam) ProtoMessage() {}
func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5643,7 +5742,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead.
func (*ProjectTeam) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
}
func (x *ProjectTeam) GetProjectNumber() string {
@@ -5660,57 +5759,6 @@ func (x *ProjectTeam) GetTeam() string {
return ""
}
-// A service account, owned by Cloud Storage, which may be used when taking
-// action on behalf of a given project, for example to publish Pub/Sub
-// notifications or to retrieve security keys.
-type ServiceAccount struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The ID of the notification.
- EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
-}
-
-func (x *ServiceAccount) Reset() {
- *x = ServiceAccount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServiceAccount) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServiceAccount) ProtoMessage() {}
-
-func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
-func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
-}
-
-func (x *ServiceAccount) GetEmailAddress() string {
- if x != nil {
- return x.EmailAddress
- }
- return ""
-}
-
// The owner of a specific resource.
type Owner struct {
state protoimpl.MessageState
@@ -5725,11 +5773,9 @@ type Owner struct {
func (x *Owner) Reset() {
*x = Owner{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Owner) String() string {
@@ -5739,8 +5785,8 @@ func (x *Owner) String() string {
func (*Owner) ProtoMessage() {}
func (x *Owner) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5752,7 +5798,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message {
// Deprecated: Use Owner.ProtoReflect.Descriptor instead.
func (*Owner) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
}
func (x *Owner) GetEntity() string {
@@ -5775,9 +5821,9 @@ type ContentRange struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The starting offset of the object data.
+ // The starting offset of the object data. This value is inclusive.
Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
- // The ending offset of the object data.
+ // The ending offset of the object data. This value is exclusive.
End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
// The complete length of the object data.
CompleteLength int64 `protobuf:"varint,3,opt,name=complete_length,json=completeLength,proto3" json:"complete_length,omitempty"`
@@ -5785,11 +5831,9 @@ type ContentRange struct {
func (x *ContentRange) Reset() {
*x = ContentRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ContentRange) String() string {
@@ -5799,8 +5843,8 @@ func (x *ContentRange) String() string {
func (*ContentRange) ProtoMessage() {}
func (x *ContentRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5812,7 +5856,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message {
// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead.
func (*ContentRange) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
}
func (x *ContentRange) GetStart() int64 {
@@ -5853,11 +5897,9 @@ type ComposeObjectRequest_SourceObject struct {
func (x *ComposeObjectRequest_SourceObject) Reset() {
*x = ComposeObjectRequest_SourceObject{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest_SourceObject) String() string {
@@ -5867,8 +5909,8 @@ func (x *ComposeObjectRequest_SourceObject) String() string {
func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5880,7 +5922,7 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message
// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0}
}
func (x *ComposeObjectRequest_SourceObject) GetName() string {
@@ -5918,11 +5960,9 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
*x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
@@ -5932,8 +5972,8 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string
func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5945,7 +5985,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p
// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0}
}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
@@ -5967,11 +6007,9 @@ type Bucket_Billing struct {
func (x *Bucket_Billing) Reset() {
*x = Bucket_Billing{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Billing) String() string {
@@ -5981,8 +6019,8 @@ func (x *Bucket_Billing) String() string {
func (*Bucket_Billing) ProtoMessage() {}
func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6033,11 +6071,9 @@ type Bucket_Cors struct {
func (x *Bucket_Cors) Reset() {
*x = Bucket_Cors{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Cors) String() string {
@@ -6047,8 +6083,8 @@ func (x *Bucket_Cors) String() string {
func (*Bucket_Cors) ProtoMessage() {}
func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6104,11 +6140,9 @@ type Bucket_Encryption struct {
func (x *Bucket_Encryption) Reset() {
*x = Bucket_Encryption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Encryption) String() string {
@@ -6118,8 +6152,8 @@ func (x *Bucket_Encryption) String() string {
func (*Bucket_Encryption) ProtoMessage() {}
func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6156,11 +6190,9 @@ type Bucket_IamConfig struct {
func (x *Bucket_IamConfig) Reset() {
*x = Bucket_IamConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_IamConfig) String() string {
@@ -6170,8 +6202,8 @@ func (x *Bucket_IamConfig) String() string {
func (*Bucket_IamConfig) ProtoMessage() {}
func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6214,11 +6246,9 @@ type Bucket_Lifecycle struct {
func (x *Bucket_Lifecycle) Reset() {
*x = Bucket_Lifecycle{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle) String() string {
@@ -6228,8 +6258,8 @@ func (x *Bucket_Lifecycle) String() string {
func (*Bucket_Lifecycle) ProtoMessage() {}
func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6266,11 +6296,9 @@ type Bucket_Logging struct {
func (x *Bucket_Logging) Reset() {
*x = Bucket_Logging{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Logging) String() string {
@@ -6280,8 +6308,8 @@ func (x *Bucket_Logging) String() string {
func (*Bucket_Logging) ProtoMessage() {}
func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6331,11 +6359,9 @@ type Bucket_RetentionPolicy struct {
func (x *Bucket_RetentionPolicy) Reset() {
*x = Bucket_RetentionPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_RetentionPolicy) String() string {
@@ -6345,8 +6371,8 @@ func (x *Bucket_RetentionPolicy) String() string {
func (*Bucket_RetentionPolicy) ProtoMessage() {}
func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6398,11 +6424,9 @@ type Bucket_SoftDeletePolicy struct {
func (x *Bucket_SoftDeletePolicy) Reset() {
*x = Bucket_SoftDeletePolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_SoftDeletePolicy) String() string {
@@ -6412,8 +6436,8 @@ func (x *Bucket_SoftDeletePolicy) String() string {
func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6456,11 +6480,9 @@ type Bucket_Versioning struct {
func (x *Bucket_Versioning) Reset() {
*x = Bucket_Versioning{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Versioning) String() string {
@@ -6470,8 +6492,8 @@ func (x *Bucket_Versioning) String() string {
func (*Bucket_Versioning) ProtoMessage() {}
func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6516,11 +6538,9 @@ type Bucket_Website struct {
func (x *Bucket_Website) Reset() {
*x = Bucket_Website{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Website) String() string {
@@ -6530,8 +6550,8 @@ func (x *Bucket_Website) String() string {
func (*Bucket_Website) ProtoMessage() {}
func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6574,11 +6594,9 @@ type Bucket_CustomPlacementConfig struct {
func (x *Bucket_CustomPlacementConfig) Reset() {
*x = Bucket_CustomPlacementConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_CustomPlacementConfig) String() string {
@@ -6588,8 +6606,8 @@ func (x *Bucket_CustomPlacementConfig) String() string {
func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6635,11 +6653,9 @@ type Bucket_Autoclass struct {
func (x *Bucket_Autoclass) Reset() {
*x = Bucket_Autoclass{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Autoclass) String() string {
@@ -6649,8 +6665,8 @@ func (x *Bucket_Autoclass) String() string {
func (*Bucket_Autoclass) ProtoMessage() {}
func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6693,6 +6709,53 @@ func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Time
return nil
}
+// Configuration for a bucket's hierarchical namespace feature.
+type Bucket_HierarchicalNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. Enables the hierarchical namespace feature.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+}
+
+func (x *Bucket_HierarchicalNamespace) Reset() {
+ *x = Bucket_HierarchicalNamespace{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Bucket_HierarchicalNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
+
+func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket_HierarchicalNamespace.ProtoReflect.Descriptor instead.
+func (*Bucket_HierarchicalNamespace) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 12}
+}
+
+func (x *Bucket_HierarchicalNamespace) GetEnabled() bool {
+ if x != nil {
+ return x.Enabled
+ }
+ return false
+}
+
// Settings for Uniform Bucket level access.
// See https://cloud.google.com/storage/docs/uniform-bucket-level-access.
type Bucket_IamConfig_UniformBucketLevelAccess struct {
@@ -6711,11 +6774,9 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct {
func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
*x = Bucket_IamConfig_UniformBucketLevelAccess{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[72]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
@@ -6725,8 +6786,8 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[72]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6770,11 +6831,9 @@ type Bucket_Lifecycle_Rule struct {
func (x *Bucket_Lifecycle_Rule) Reset() {
*x = Bucket_Lifecycle_Rule{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule) String() string {
@@ -6784,8 +6843,8 @@ func (x *Bucket_Lifecycle_Rule) String() string {
func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6830,11 +6889,9 @@ type Bucket_Lifecycle_Rule_Action struct {
func (x *Bucket_Lifecycle_Rule_Action) Reset() {
*x = Bucket_Lifecycle_Rule_Action{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule_Action) String() string {
@@ -6844,8 +6901,8 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string {
func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[72]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6928,11 +6985,9 @@ type Bucket_Lifecycle_Rule_Condition struct {
func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
*x = Bucket_Lifecycle_Rule_Condition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule_Condition) String() string {
@@ -6942,8 +6997,8 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string {
func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -7060,13 +7115,103 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+ 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42,
+ 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f,
+ 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69,
+ 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70,
+ 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d,
+ 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22,
+ 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61,
+ 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64,
+ 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74,
+ 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f,
+ 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a,
+ 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66,
0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69,
0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
@@ -7074,343 +7219,157 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18,
0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41,
+ 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
+ 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e,
- 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
- 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64,
- 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
- 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
- 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c,
- 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13,
- 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74,
- 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42,
- 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64,
- 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65,
- 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b,
- 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
- 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70,
- 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b,
+ 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
+ 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64,
+ 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b,
+ 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
- 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
- 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70,
+ 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
+ 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a,
+ 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a,
0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
- 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8,
- 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
- 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
- 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04,
- 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a,
- 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02,
- 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66,
- 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b,
- 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
- 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
+ 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f,
- 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22,
- 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73,
- 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16,
+ 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e,
+ 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3,
+ 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
- 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
- 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
+ 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52,
+ 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01,
+ 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
@@ -7418,45 +7377,101 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4,
- 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65,
+ 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52,
+ 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65,
+ 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65,
+ 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17,
0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52,
0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d,
0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d,
+ 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d,
0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66,
0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f,
0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
+ 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
+ 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73,
+ 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a,
+ 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c,
+ 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52,
+ 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d,
+ 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
@@ -7484,604 +7499,787 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
- 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33,
- 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69,
- 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
- 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc6, 0x06, 0x0a, 0x12, 0x42, 0x69, 0x64, 0x69,
+ 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11,
- 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65,
- 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26,
- 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
- 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
- 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15,
+ 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52,
+ 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x18, 0x01, 0x48, 0x04,
+ 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a,
+ 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x48,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x48, 0x06, 0x52,
+ 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01,
+ 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x42, 0x0e,
+ 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x10,
+ 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0xa7, 0x01, 0x0a, 0x15, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x72, 0x65,
+ 0x61, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x72, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d, 0x0a, 0x0b, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a,
+ 0x72, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x16, 0x42,
+ 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69,
- 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73,
- 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12,
- 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52,
- 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69,
- 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
- 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69,
- 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x61, 0x74,
+ 0x61, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x42,
+ 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64,
+ 0x6c, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69,
+ 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x72, 0x65,
+ 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88,
+ 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xed, 0x01, 0x0a, 0x1e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x65, 0x64, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01,
+ 0x01, 0x12, 0x4a, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x01, 0x52, 0x0b, 0x77,
+ 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88,
+ 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x13, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x11, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x55, 0x0a, 0x0e, 0x52, 0x65,
+ 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x17, 0x0a, 0x07,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72,
+ 0x65, 0x61, 0x64, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x22, 0x75, 0x0a, 0x09, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x24,
+ 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66,
+ 0x66, 0x73, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a,
+ 0x72, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x65,
+ 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x49, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x0f, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x0a, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x61, 0x6e,
+ 0x67, 0x65, 0x45, 0x6e, 0x64, 0x22, 0x2d, 0x0a, 0x0e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61,
+ 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x2e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
- 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61,
- 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12,
- 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01,
- 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63,
- 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67,
- 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66,
- 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18,
- 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d,
- 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49,
- 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a,
- 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74,
- 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42,
- 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22,
- 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa,
- 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65,
- 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d,
- 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18,
- 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12,
- 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11,
- 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a,
- 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69,
- 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e,
- 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70,
- 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d,
- 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e,
- 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
- 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79,
- 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79,
- 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
+ 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72,
+ 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17,
+ 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
+ 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03,
+ 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a,
+ 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x65, 0x6e,
+ 0x64, 0x61, 0x62, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18,
+ 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, 0x70, 0x70,
+ 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
+ 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a,
+ 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12,
+ 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73,
+ 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66,
+ 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72,
+ 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65,
+ 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe9, 0x03, 0x0a,
+ 0x10, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x0c, 0x72, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x4a, 0x0a,
+ 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x03, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x8a, 0x05, 0x0a, 0x16, 0x42, 0x69, 0x64,
+ 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x53, 0x70, 0x65, 0x63, 0x12, 0x53, 0x0a, 0x12, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x10, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48,
+ 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69,
+ 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a,
0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
- 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f,
- 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69,
- 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a,
- 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74,
- 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61,
- 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12,
- 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65,
- 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04,
- 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61,
- 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
- 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72,
+ 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69,
+ 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x01, 0x52,
+ 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42,
+ 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42,
+ 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
+ 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69,
+ 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61,
+ 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16,
+ 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
+ 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69,
+ 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69,
+ 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f,
+ 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74,
+ 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c,
+ 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61,
+ 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68,
- 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
- 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
- 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d,
- 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87,
- 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33,
- 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69,
- 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73,
+ 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57,
+ 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0,
+ 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12,
+ 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61,
+ 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41,
+ 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05,
+ 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e,
+ 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61,
+ 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65,
+ 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
+ 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f,
+ 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63,
+ 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63,
+ 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36,
+ 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f,
+ 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73,
+ 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+ 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16,
+ 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e,
+ 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d,
+ 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a,
+ 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52,
+ 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32,
+ 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74,
+ 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x22, 0xec, 0x07, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x32, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x45, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48,
+ 0x00, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4c, 0x0a,
+ 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x1a, 0x69, 0x66,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4d, 0x0a, 0x1e, 0x69,
+ 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x02, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x54, 0x0a, 0x22, 0x69, 0x66,
+ 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x03, 0x52, 0x1e, 0x69,
+ 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61,
- 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66,
- 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
+ 0x12, 0x38, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x48, 0x04, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x17, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x48, 0x05, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x48, 0x06, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x07, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f,
+ 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64,
+ 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52,
+ 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42,
+ 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
+ 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64,
+ 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
+ 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
+ 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18,
- 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f,
- 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
- 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65,
- 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45,
- 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48,
- 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e,
- 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61,
- 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28,
- 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74,
- 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c,
+ 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79,
+ 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a,
+ 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
+ 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a,
+ 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74,
+ 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12,
+ 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44,
+ 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80,
+ 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43,
+ 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12,
+ 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49,
+ 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41,
+ 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54,
+ 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53,
+ 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45,
+ 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80,
+ 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f,
+ 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53,
+ 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24,
+ 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44,
+ 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42,
+ 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f,
+ 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e,
+ 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10,
+ 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43,
+ 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43,
+ 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54,
+ 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
+ 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a,
+ 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55,
+ 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02,
+ 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52,
+ 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47,
+ 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42,
+ 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e,
+ 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c,
+ 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47,
+ 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45,
+ 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a,
+ 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45,
+ 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45,
+ 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54,
+ 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44,
+ 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a,
+ 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65,
+ 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c,
0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67,
0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65,
- 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69,
- 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c,
- 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64,
- 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f,
- 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14,
- 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61,
- 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
- 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01,
- 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61,
- 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12,
+ 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70,
+ 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63,
+ 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52,
+ 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63,
+ 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63,
+ 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41,
- 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61,
- 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68,
- 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5,
- 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c,
- 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48,
- 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c,
- 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e,
- 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12,
- 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f,
- 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43,
- 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46,
- 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
- 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f,
- 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29,
- 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54,
- 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45,
- 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58,
- 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41,
- 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45,
- 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54,
- 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47,
- 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22,
- 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f,
- 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
- 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49,
- 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54,
- 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41,
- 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43,
- 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f,
- 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a,
- 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f,
- 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55,
- 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10,
- 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53,
- 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40,
- 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b,
- 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10,
- 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f,
- 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54,
- 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f,
- 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10,
- 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45,
- 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53,
- 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04,
- 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a,
- 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03,
- 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38,
- 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41,
- 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66,
- 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c,
- 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
- 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72,
- 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64,
- 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f,
- 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61,
- 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c,
- 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65,
- 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44,
- 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18,
- 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61,
+ 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65,
+ 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
+ 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18,
+ 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e,
- 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07,
- 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
+ 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74,
+ 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69,
+ 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67,
+ 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67,
- 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f,
- 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
- 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49,
- 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73,
- 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69,
- 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74,
- 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74,
- 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
- 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63,
- 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c,
+ 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10,
+ 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66,
+ 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73,
+ 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c,
+ 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75,
+ 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61,
+ 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68,
+ 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74,
@@ -8250,100 +8448,48 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25,
0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
- 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73,
- 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a,
- 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c,
- 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12,
- 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74,
- 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61,
- 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
- 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54,
- 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d,
- 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32,
- 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22,
- 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
- 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12,
- 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63,
- 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
- 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20,
- 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40,
- 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
- 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
- 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
- 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
- 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a,
- 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75,
- 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a,
- 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63,
+ 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d,
+ 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a,
+ 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
+ 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f,
+ 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e,
+ 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16,
+ 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
+ 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65,
+ 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
+ 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52,
+ 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12,
+ 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
+ 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07,
+ 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a,
+ 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63,
+ 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f,
+ 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48,
+ 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71,
0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01,
@@ -8351,7 +8497,7 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73,
0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04,
+ 0x73, 0x22, 0xbd, 0x0e, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
@@ -8360,474 +8506,414 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
- 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a,
- 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12,
- 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64,
- 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64,
- 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63,
- 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c,
- 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03,
- 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63,
- 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40,
- 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
- 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65,
- 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f,
- 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
- 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07,
- 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa,
- 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a,
- 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f,
- 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d,
- 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64,
- 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78,
- 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48,
- 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c,
- 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63,
- 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a,
+ 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17,
+ 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69,
+ 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69,
+ 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68,
+ 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18,
+ 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61,
+ 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a,
+ 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
+ 0x44, 0x0a, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b,
- 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
- 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
- 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64,
- 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61,
- 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74,
- 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61,
- 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12,
- 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c,
- 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
- 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65,
- 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c,
- 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05,
- 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a,
- 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f,
- 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65,
- 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c,
- 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d,
- 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07,
- 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+ 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
+ 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b,
+ 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64,
+ 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72,
+ 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e,
+ 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56,
+ 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x02,
+ 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x03,
+ 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73,
+ 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, 0x66, 0x74,
+ 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, 0x0a, 0x11,
+ 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f,
+ 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09,
+ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d,
+ 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c,
+ 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13,
+ 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b,
+ 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12,
+ 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f,
+ 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xc1, 0x1e, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15,
+ 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47,
- 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74,
- 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a,
- 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93,
- 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c,
- 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3,
+ 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a,
+ 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a,
- 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
- 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74,
+ 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
+ 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
+ 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda,
+ 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19,
+ 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74,
0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d,
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
- 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61,
- 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c,
- 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12,
- 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
- 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73,
- 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
+ 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74,
+ 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54,
+ 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2a,
+ 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
- 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f,
+ 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01,
+ 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a,
- 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12,
- 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
- 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e,
- 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48,
+ 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52,
+ 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38,
+ 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
- 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
- 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
- 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65,
- 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f,
+ 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01,
+ 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
+ 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x99, 0x01, 0x0a, 0x0e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65,
+ 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64,
+ 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x0a, 0x17, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x28, 0x01, 0x30,
+ 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
+ 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01,
+ 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22,
- 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
- 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61,
- 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
- 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73,
- 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34,
+ 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
+ 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69,
0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61,
0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72,
0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73,
- 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5,
- 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a,
- 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
- 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
- 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98,
- 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65,
- 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a,
- 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d,
- 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61,
- 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74,
- 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12,
- 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x96, 0x01, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
- 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11,
- 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4,
- 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95,
- 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3,
- 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65,
- 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4,
- 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c,
- 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48,
- 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a,
- 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14,
- 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
- 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d,
- 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c,
- 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca,
- 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70,
- 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73,
- 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x47, 0xda, 0x41, 0x27, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a,
+ 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61,
+ 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64,
+ 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f,
0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
- 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66,
- 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70,
- 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79,
- 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12,
- 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
- 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f,
- 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
- 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
- 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70,
- 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
+ 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a,
+ 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -8843,8 +8929,8 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte {
}
var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78)
-var file_google_storage_v2_storage_proto_goTypes = []interface{}{
+var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 75)
+var file_google_storage_v2_storage_proto_goTypes = []any{
(ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values
(*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest
(*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest
@@ -8853,258 +8939,252 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{
(*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse
(*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest
(*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest
- (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest
- (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest
- (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest
- (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest
- (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse
- (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest
- (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest
- (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest
- (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest
- (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse
- (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest
- (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest
- (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse
- (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec
- (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest
- (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse
- (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest
- (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse
- (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest
- (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest
- (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse
- (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest
- (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse
- (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest
- (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse
- (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest
- (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest
- (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest
- (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse
- (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest
- (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest
- (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest
- (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse
- (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest
+ (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest
+ (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest
+ (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest
+ (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest
+ (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse
+ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest
+ (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest
+ (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse
+ (*BidiReadObjectSpec)(nil), // 16: google.storage.v2.BidiReadObjectSpec
+ (*BidiReadObjectRequest)(nil), // 17: google.storage.v2.BidiReadObjectRequest
+ (*BidiReadObjectResponse)(nil), // 18: google.storage.v2.BidiReadObjectResponse
+ (*BidiReadObjectRedirectedError)(nil), // 19: google.storage.v2.BidiReadObjectRedirectedError
+ (*BidiWriteObjectRedirectedError)(nil), // 20: google.storage.v2.BidiWriteObjectRedirectedError
+ (*BidiReadObjectError)(nil), // 21: google.storage.v2.BidiReadObjectError
+ (*ReadRangeError)(nil), // 22: google.storage.v2.ReadRangeError
+ (*ReadRange)(nil), // 23: google.storage.v2.ReadRange
+ (*ObjectRangeData)(nil), // 24: google.storage.v2.ObjectRangeData
+ (*BidiReadHandle)(nil), // 25: google.storage.v2.BidiReadHandle
+ (*BidiWriteHandle)(nil), // 26: google.storage.v2.BidiWriteHandle
+ (*WriteObjectSpec)(nil), // 27: google.storage.v2.WriteObjectSpec
+ (*WriteObjectRequest)(nil), // 28: google.storage.v2.WriteObjectRequest
+ (*WriteObjectResponse)(nil), // 29: google.storage.v2.WriteObjectResponse
+ (*AppendObjectSpec)(nil), // 30: google.storage.v2.AppendObjectSpec
+ (*BidiWriteObjectRequest)(nil), // 31: google.storage.v2.BidiWriteObjectRequest
+ (*BidiWriteObjectResponse)(nil), // 32: google.storage.v2.BidiWriteObjectResponse
+ (*ListObjectsRequest)(nil), // 33: google.storage.v2.ListObjectsRequest
+ (*QueryWriteStatusRequest)(nil), // 34: google.storage.v2.QueryWriteStatusRequest
+ (*QueryWriteStatusResponse)(nil), // 35: google.storage.v2.QueryWriteStatusResponse
+ (*RewriteObjectRequest)(nil), // 36: google.storage.v2.RewriteObjectRequest
+ (*RewriteResponse)(nil), // 37: google.storage.v2.RewriteResponse
+ (*MoveObjectRequest)(nil), // 38: google.storage.v2.MoveObjectRequest
+ (*StartResumableWriteRequest)(nil), // 39: google.storage.v2.StartResumableWriteRequest
+ (*StartResumableWriteResponse)(nil), // 40: google.storage.v2.StartResumableWriteResponse
+ (*UpdateObjectRequest)(nil), // 41: google.storage.v2.UpdateObjectRequest
(*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams
(*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants
(*Bucket)(nil), // 44: google.storage.v2.Bucket
(*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl
(*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData
(*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums
- (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata
- (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig
- (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption
- (*Object)(nil), // 51: google.storage.v2.Object
- (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl
- (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse
- (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam
- (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount
- (*Owner)(nil), // 56: google.storage.v2.Owner
- (*ContentRange)(nil), // 57: google.storage.v2.ContentRange
- (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject
- (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
- (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing
- (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors
- (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption
- (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig
- (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle
- (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging
- (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy
- (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy
- (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning
- (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website
- (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig
- (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass
- nil, // 72: google.storage.v2.Bucket.LabelsEntry
- (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
- (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule
- (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action
- (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition
- nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry
- nil, // 78: google.storage.v2.Object.MetadataEntry
- (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask
- (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 81: google.protobuf.Duration
- (*date.Date)(nil), // 82: google.type.Date
- (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest
- (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest
- (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest
- (*emptypb.Empty)(nil), // 86: google.protobuf.Empty
- (*iampb.Policy)(nil), // 87: google.iam.v1.Policy
- (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse
+ (*CustomerEncryption)(nil), // 48: google.storage.v2.CustomerEncryption
+ (*Object)(nil), // 49: google.storage.v2.Object
+ (*ObjectAccessControl)(nil), // 50: google.storage.v2.ObjectAccessControl
+ (*ListObjectsResponse)(nil), // 51: google.storage.v2.ListObjectsResponse
+ (*ProjectTeam)(nil), // 52: google.storage.v2.ProjectTeam
+ (*Owner)(nil), // 53: google.storage.v2.Owner
+ (*ContentRange)(nil), // 54: google.storage.v2.ContentRange
+ (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject
+ (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
+ (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing
+ (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors
+ (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption
+ (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig
+ (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle
+ (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging
+ (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy
+ (*Bucket_SoftDeletePolicy)(nil), // 64: google.storage.v2.Bucket.SoftDeletePolicy
+ (*Bucket_Versioning)(nil), // 65: google.storage.v2.Bucket.Versioning
+ (*Bucket_Website)(nil), // 66: google.storage.v2.Bucket.Website
+ (*Bucket_CustomPlacementConfig)(nil), // 67: google.storage.v2.Bucket.CustomPlacementConfig
+ (*Bucket_Autoclass)(nil), // 68: google.storage.v2.Bucket.Autoclass
+ (*Bucket_HierarchicalNamespace)(nil), // 69: google.storage.v2.Bucket.HierarchicalNamespace
+ nil, // 70: google.storage.v2.Bucket.LabelsEntry
+ (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 71: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
+ (*Bucket_Lifecycle_Rule)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule
+ (*Bucket_Lifecycle_Rule_Action)(nil), // 73: google.storage.v2.Bucket.Lifecycle.Rule.Action
+ (*Bucket_Lifecycle_Rule_Condition)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule.Condition
+ nil, // 75: google.storage.v2.Object.MetadataEntry
+ (*fieldmaskpb.FieldMask)(nil), // 76: google.protobuf.FieldMask
+ (*status.Status)(nil), // 77: google.rpc.Status
+ (*timestamppb.Timestamp)(nil), // 78: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 79: google.protobuf.Duration
+ (*date.Date)(nil), // 80: google.type.Date
+ (*iampb.GetIamPolicyRequest)(nil), // 81: google.iam.v1.GetIamPolicyRequest
+ (*iampb.SetIamPolicyRequest)(nil), // 82: google.iam.v1.SetIamPolicyRequest
+ (*iampb.TestIamPermissionsRequest)(nil), // 83: google.iam.v1.TestIamPermissionsRequest
+ (*emptypb.Empty)(nil), // 84: google.protobuf.Empty
+ (*iampb.Policy)(nil), // 85: google.iam.v1.Policy
+ (*iampb.TestIamPermissionsResponse)(nil), // 86: google.iam.v1.TestIamPermissionsResponse
}
var file_google_storage_v2_storage_proto_depIdxs = []int32{
- 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 76, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
- 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 76, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
- 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
- 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
- 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
- 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
- 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
- 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
- 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
- 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
- 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
- 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
- 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
- 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
- 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
- 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
- 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
- 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
- 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
- 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
- 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
- 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
- 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
- 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
- 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
- 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
- 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
- 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
- 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
- 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
- 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
- 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
- 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
- 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
- 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
- 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
- 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
- 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
- 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
- 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
- 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
- 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
- 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
- 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
- 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
- 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
- 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
- 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
- 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
- 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
- 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
- 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
- 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
- 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
- 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
- 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
- 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
- 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
- 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
- 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
- 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
- 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
- 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
- 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
- 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
- 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
- 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
- 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
- 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
- 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
- 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
- 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
- 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
- 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
- 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
- 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
- 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
- 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
- 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
- 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
- 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
- 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
- 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
- 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
- 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
- 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
- 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
- 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
- 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
- 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
- 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
- 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
- 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
- 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
- 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
- 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
- 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
- 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
- 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
- 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
- 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
- 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
- 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
- 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
- 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
- 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
- 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
- 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
- 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
- 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
- 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
- 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
- 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
- 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
- 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
- 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
- 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
- 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
- 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
- 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
- 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
- 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
- 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
- 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
- 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 132, // [132:164] is the sub-list for method output_type
- 100, // [100:132] is the sub-list for method input_type
- 100, // [100:100] is the sub-list for extension type_name
- 100, // [100:100] is the sub-list for extension extendee
- 0, // [0:100] is the sub-list for field type_name
+ 76, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 49, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
+ 55, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
+ 42, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 42, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 42, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 42, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 46, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 54, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
+ 49, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 42, // 20: google.storage.v2.BidiReadObjectSpec.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 21: google.storage.v2.BidiReadObjectSpec.read_mask:type_name -> google.protobuf.FieldMask
+ 25, // 22: google.storage.v2.BidiReadObjectSpec.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 16, // 23: google.storage.v2.BidiReadObjectRequest.read_object_spec:type_name -> google.storage.v2.BidiReadObjectSpec
+ 23, // 24: google.storage.v2.BidiReadObjectRequest.read_ranges:type_name -> google.storage.v2.ReadRange
+ 24, // 25: google.storage.v2.BidiReadObjectResponse.object_data_ranges:type_name -> google.storage.v2.ObjectRangeData
+ 49, // 26: google.storage.v2.BidiReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 25, // 27: google.storage.v2.BidiReadObjectResponse.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 25, // 28: google.storage.v2.BidiReadObjectRedirectedError.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 26, // 29: google.storage.v2.BidiWriteObjectRedirectedError.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 22, // 30: google.storage.v2.BidiReadObjectError.read_range_errors:type_name -> google.storage.v2.ReadRangeError
+ 77, // 31: google.storage.v2.ReadRangeError.status:type_name -> google.rpc.Status
+ 46, // 32: google.storage.v2.ObjectRangeData.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 23, // 33: google.storage.v2.ObjectRangeData.read_range:type_name -> google.storage.v2.ReadRange
+ 49, // 34: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
+ 27, // 35: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 46, // 36: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 37: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 38: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 39: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 26, // 40: google.storage.v2.AppendObjectSpec.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 27, // 41: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 30, // 42: google.storage.v2.BidiWriteObjectRequest.append_object_spec:type_name -> google.storage.v2.AppendObjectSpec
+ 46, // 43: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 44: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 45: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 46: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 26, // 47: google.storage.v2.BidiWriteObjectResponse.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 76, // 48: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 42, // 49: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 50: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
+ 49, // 51: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
+ 42, // 52: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 53: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 49, // 54: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
+ 27, // 55: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 42, // 56: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 57: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 49, // 58: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
+ 76, // 59: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 42, // 60: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 45, // 61: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
+ 50, // 62: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
+ 61, // 63: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
+ 78, // 64: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
+ 58, // 65: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
+ 78, // 66: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
+ 70, // 67: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
+ 66, // 68: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
+ 65, // 69: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
+ 62, // 70: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
+ 53, // 71: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
+ 59, // 72: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
+ 57, // 73: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
+ 63, // 74: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
+ 60, // 75: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
+ 67, // 76: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
+ 68, // 77: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
+ 69, // 78: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
+ 64, // 79: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
+ 52, // 80: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 50, // 81: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
+ 78, // 82: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
+ 78, // 83: google.storage.v2.Object.finalize_time:type_name -> google.protobuf.Timestamp
+ 78, // 84: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
+ 47, // 85: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
+ 78, // 86: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
+ 78, // 87: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
+ 78, // 88: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
+ 75, // 89: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
+ 53, // 90: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
+ 48, // 91: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
+ 78, // 92: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
+ 78, // 93: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
+ 78, // 94: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
+ 52, // 95: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 49, // 96: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 56, // 97: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
+ 71, // 98: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
+ 72, // 99: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
+ 78, // 100: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
+ 79, // 101: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
+ 79, // 102: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
+ 78, // 103: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
+ 78, // 104: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
+ 78, // 105: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
+ 78, // 106: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
+ 73, // 107: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
+ 74, // 108: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
+ 80, // 109: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
+ 80, // 110: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
+ 80, // 111: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
+ 1, // 112: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
+ 2, // 113: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
+ 3, // 114: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
+ 4, // 115: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
+ 6, // 116: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
+ 81, // 117: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
+ 82, // 118: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
+ 83, // 119: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
+ 7, // 120: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
+ 8, // 121: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
+ 9, // 122: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
+ 10, // 123: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
+ 11, // 124: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
+ 14, // 125: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
+ 13, // 126: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
+ 17, // 127: google.storage.v2.Storage.BidiReadObject:input_type -> google.storage.v2.BidiReadObjectRequest
+ 41, // 128: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
+ 28, // 129: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
+ 31, // 130: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
+ 33, // 131: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
+ 36, // 132: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
+ 39, // 133: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
+ 34, // 134: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
+ 38, // 135: google.storage.v2.Storage.MoveObject:input_type -> google.storage.v2.MoveObjectRequest
+ 84, // 136: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
+ 44, // 137: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
+ 44, // 138: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
+ 5, // 139: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
+ 44, // 140: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
+ 85, // 141: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
+ 85, // 142: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
+ 86, // 143: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
+ 44, // 144: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
+ 49, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
+ 84, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
+ 49, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
+ 12, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
+ 49, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
+ 15, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
+ 18, // 151: google.storage.v2.Storage.BidiReadObject:output_type -> google.storage.v2.BidiReadObjectResponse
+ 49, // 152: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
+ 29, // 153: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
+ 32, // 154: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
+ 51, // 155: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
+ 37, // 156: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
+ 40, // 157: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
+ 35, // 158: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
+ 49, // 159: google.storage.v2.Storage.MoveObject:output_type -> google.storage.v2.Object
+ 136, // [136:160] is the sub-list for method output_type
+ 112, // [112:136] is the sub-list for method input_type
+ 112, // [112:112] is the sub-list for extension type_name
+ 112, // [112:112] is the sub-list for extension extendee
+ 0, // [0:112] is the sub-list for field type_name
}
func init() { file_google_storage_v2_storage_proto_init() }
@@ -9112,957 +9192,61 @@ func file_google_storage_v2_storage_proto_init() {
if File_google_storage_v2_storage_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListBucketsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListBucketsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LockBucketRetentionPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListNotificationConfigsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListNotificationConfigsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelResumableWriteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelResumableWriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BidiWriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BidiWriteObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListObjectsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryWriteStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryWriteStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RewriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RewriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartResumableWriteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartResumableWriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetServiceAccountRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateHmacKeyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListHmacKeysRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListHmacKeysResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CommonObjectRequestParams); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceConstants); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BucketAccessControl); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ChecksummedData); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ObjectChecksums); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HmacKeyMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NotificationConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CustomerEncryption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Object); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ObjectAccessControl); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListObjectsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProjectTeam); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceAccount); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Owner); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ContentRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest_SourceObject); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Billing); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Cors); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Encryption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_IamConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Logging); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_RetentionPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_SoftDeletePolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Versioning); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Website); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_CustomPlacementConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Autoclass); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule_Action); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule_Condition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[26].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{
(*WriteObjectRequest_UploadId)(nil),
(*WriteObjectRequest_WriteObjectSpec)(nil),
(*WriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{
(*WriteObjectResponse_PersistedSize)(nil),
(*WriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[30].OneofWrappers = []any{
(*BidiWriteObjectRequest_UploadId)(nil),
(*BidiWriteObjectRequest_WriteObjectSpec)(nil),
+ (*BidiWriteObjectRequest_AppendObjectSpec)(nil),
(*BidiWriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[31].OneofWrappers = []any{
(*BidiWriteObjectResponse_PersistedSize)(nil),
(*BidiWriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[34].OneofWrappers = []any{
(*QueryWriteStatusResponse_PersistedSize)(nil),
(*QueryWriteStatusResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{}
+ file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[37].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[40].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[48].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[63].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[67].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[73].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_storage_v2_storage_proto_rawDesc,
NumEnums: 1,
- NumMessages: 78,
+ NumMessages: 75,
NumExtensions: 0,
NumServices: 1,
},
@@ -10099,44 +9283,47 @@ type StorageClient interface {
ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error)
// Locks retention policy on a bucket.
LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error)
- // Gets the IAM policy for a specified bucket or object.
+ // Gets the IAM policy for a specified bucket.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}`.
GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Updates an IAM policy for the specified bucket or object.
+ // Updates an IAM policy for the specified bucket.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}`.
SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error)
- // Deletes an object and its metadata.
+ // Deletes an object and its metadata. Deletions are permanent if versioning
+ // is not enabled for the bucket, or if the generation parameter is used, or
+ // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not
+ // enabled for the bucket.
+ // When this API is used to delete an object from a bucket that has soft
+ // delete policy enabled, the object becomes soft deleted, and the
+ // `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+ // This API cannot be used to permanently delete soft-deleted objects.
+ // Soft-deleted objects are permanently deleted according to their
+ // `hardDeleteTime`.
//
- // Deletions are normally permanent when versioning is disabled or whenever
- // the generation parameter is used. However, if soft delete is enabled for
- // the bucket, deleted objects can be restored using RestoreObject until the
- // soft delete retention period has passed.
+ // You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+ // API to restore soft-deleted objects until the soft delete retention period
+ // has passed.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.delete`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Restores a soft-deleted object.
RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10149,10 +9336,43 @@ type StorageClient interface {
// they could either complete before the cancellation or fail if the
// cancellation completes first.
CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error)
- // Retrieves an object's metadata.
+ // Retrieves object metadata.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket. To return object ACLs, the authenticated user must also have
+ // the `storage.objects.getIamPolicy` permission.
GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error)
- // Reads an object's data.
+ // Retrieves object data.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error)
+ // Reads an object's data.
+ //
+ // This is a bi-directional API with the added support for reading multiple
+ // ranges within one stream both within and across multiple messages.
+ // If the server encountered an error for any of the inputs, the stream will
+ // be closed with the relevant error code.
+ // Because the API allows for multiple outstanding requests, when the stream
+ // is closed the error response will contain a BidiReadObjectRangesError proto
+ // in the error extension describing the error for each outstanding read_id.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ //
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
+ //
+ // This API is currently in preview and is not yet available for general
+ // use.
+ BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error)
// Updates an object's metadata.
// Equivalent to JSON API's storage.objects.patch.
UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10209,12 +9429,18 @@ type StorageClient interface {
// whether the service views the object as complete.
//
// Attempting to resume an already finalized object will result in an OK
- // status, with a WriteObjectResponse containing the finalized object's
+ // status, with a `WriteObjectResponse` containing the finalized object's
// metadata.
//
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error)
// Stores a new object and metadata.
//
@@ -10233,40 +9459,51 @@ type StorageClient interface {
// always be sent to the client, regardless of the value of `state_lookup`.
BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error)
// Retrieves a list of objects matching the criteria.
+ //
+ // **IAM Permissions**:
+ //
+ // The authenticated user requires `storage.objects.list`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions)
+ // to use this method. To return object ACLs, the authenticated user must also
+ // have the `storage.objects.getIamPolicy` permission.
ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error)
// Rewrites a source object to a destination object. Optionally overrides
// metadata.
RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error)
- // Starts a resumable write. How long the write operation remains valid, and
- // what happens when the write operation becomes invalid, are
- // service-dependent.
+ // Starts a resumable write operation. This
+ // method is part of the [Resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // This allows you to upload large objects in multiple chunks, which is more
+ // resilient to network interruptions than a single upload. The validity
+ // duration of the write operation, and the consequences of it becoming
+ // invalid, are service-dependent.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error)
- // Determines the `persisted_size` for an object that is being written, which
- // can then be used as the `write_offset` for the next `Write()` call.
+ // Determines the `persisted_size` of an object that is being written. This
+ // method is part of the [resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // The returned value is the size of the object that has been persisted so
+ // far. The value can be used as the `write_offset` for the next `Write()`
+ // call.
//
- // If the object does not exist (i.e., the object has been deleted, or the
- // first `Write()` has not yet reached the service), this method returns the
+ // If the object does not exist, meaning if it was deleted, or the
+ // first `Write()` has not yet reached the service, this method returns the
// error `NOT_FOUND`.
//
- // The client **may** call `QueryWriteStatus()` at any time to determine how
- // much data has been processed for this object. This is useful if the
- // client is buffering data and needs to know which data can be safely
- // evicted. For any sequence of `QueryWriteStatus()` calls for a given
- // object name, the sequence of returned `persisted_size` values will be
+ // This method is useful for clients that buffer data and need to know which
+ // data can be safely evicted. The client can call `QueryWriteStatus()` at any
+ // time to determine how much data has been logged for this object.
+ // For any sequence of `QueryWriteStatus()` calls for a given
+ // object name, the sequence of returned `persisted_size` values are
// non-decreasing.
QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error)
- // Retrieves the name of a project's Google Cloud Storage service account.
- GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error)
- // Creates a new HMAC key for the given service account.
- CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error)
- // Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Gets an existing HMAC key metadata for the given id.
- GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
- // Lists HMAC keys under a given project with the additional filters provided.
- ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error)
- // Updates a given HMAC key state between ACTIVE and INACTIVE.
- UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Moves the source object to the destination object in the same bucket.
+ MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error)
}
type storageClient struct {
@@ -10306,88 +9543,52 @@ func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketReques
func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) {
out := new(ListBucketsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) {
- out := new(Bucket)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- out := new(iampb.TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) {
+func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) {
out := new(Bucket)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
+func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
+func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
+func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ out := new(iampb.TestIamPermissionsResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
- out := new(ListNotificationConfigsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
+func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) {
+ out := new(Bucket)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10471,6 +9672,37 @@ func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) {
return m, nil
}
+func (c *storageClient) BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/BidiReadObject", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &storageBidiReadObjectClient{stream}
+ return x, nil
+}
+
+type Storage_BidiReadObjectClient interface {
+ Send(*BidiReadObjectRequest) error
+ Recv() (*BidiReadObjectResponse, error)
+ grpc.ClientStream
+}
+
+type storageBidiReadObjectClient struct {
+ grpc.ClientStream
+}
+
+func (x *storageBidiReadObjectClient) Send(m *BidiReadObjectRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *storageBidiReadObjectClient) Recv() (*BidiReadObjectResponse, error) {
+ m := new(BidiReadObjectResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) {
out := new(Object)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...)
@@ -10481,7 +9713,7 @@ func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectReques
}
func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/WriteObject", opts...)
if err != nil {
return nil, err
}
@@ -10515,7 +9747,7 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error)
}
func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[3], "/google.storage.v2.Storage/BidiWriteObject", opts...)
if err != nil {
return nil, err
}
@@ -10581,54 +9813,9 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat
return out, nil
}
-func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
- out := new(ServiceAccount)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
- out := new(CreateHmacKeyResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
- out := new(ListHmacKeysResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
+func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) {
+ out := new(Object)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/MoveObject", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10647,44 +9834,47 @@ type StorageServer interface {
ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error)
// Locks retention policy on a bucket.
LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error)
- // Gets the IAM policy for a specified bucket or object.
+ // Gets the IAM policy for a specified bucket.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}`.
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error)
- // Updates an IAM policy for the specified bucket or object.
+ // Updates an IAM policy for the specified bucket.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}`.
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error)
- // Deletes an object and its metadata.
+ // Deletes an object and its metadata. Deletions are permanent if versioning
+ // is not enabled for the bucket, or if the generation parameter is used, or
+ // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not
+ // enabled for the bucket.
+ // When this API is used to delete an object from a bucket that has soft
+ // delete policy enabled, the object becomes soft deleted, and the
+ // `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+ // This API cannot be used to permanently delete soft-deleted objects.
+ // Soft-deleted objects are permanently deleted according to their
+ // `hardDeleteTime`.
+ //
+ // You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+ // API to restore soft-deleted objects until the soft delete retention period
+ // has passed.
//
- // Deletions are normally permanent when versioning is disabled or whenever
- // the generation parameter is used. However, if soft delete is enabled for
- // the bucket, deleted objects can be restored using RestoreObject until the
- // soft delete retention period has passed.
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.delete`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error)
// Restores a soft-deleted object.
RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error)
@@ -10697,10 +9887,43 @@ type StorageServer interface {
// they could either complete before the cancellation or fail if the
// cancellation completes first.
CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error)
- // Retrieves an object's metadata.
+ // Retrieves object metadata.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket. To return object ACLs, the authenticated user must also have
+ // the `storage.objects.getIamPolicy` permission.
GetObject(context.Context, *GetObjectRequest) (*Object, error)
- // Reads an object's data.
+ // Retrieves object data.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error
+ // Reads an object's data.
+ //
+ // This is a bi-directional API with the added support for reading multiple
+ // ranges within one stream both within and across multiple messages.
+ // If the server encountered an error for any of the inputs, the stream will
+ // be closed with the relevant error code.
+ // Because the API allows for multiple outstanding requests, when the stream
+ // is closed the error response will contain a BidiReadObjectRangesError proto
+ // in the error extension describing the error for each outstanding read_id.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ //
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
+ //
+ // This API is currently in preview and is not yet available for general
+ // use.
+ BidiReadObject(Storage_BidiReadObjectServer) error
// Updates an object's metadata.
// Equivalent to JSON API's storage.objects.patch.
UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error)
@@ -10757,12 +9980,18 @@ type StorageServer interface {
// whether the service views the object as complete.
//
// Attempting to resume an already finalized object will result in an OK
- // status, with a WriteObjectResponse containing the finalized object's
+ // status, with a `WriteObjectResponse` containing the finalized object's
// metadata.
//
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
WriteObject(Storage_WriteObjectServer) error
// Stores a new object and metadata.
//
@@ -10781,40 +10010,51 @@ type StorageServer interface {
// always be sent to the client, regardless of the value of `state_lookup`.
BidiWriteObject(Storage_BidiWriteObjectServer) error
// Retrieves a list of objects matching the criteria.
+ //
+ // **IAM Permissions**:
+ //
+ // The authenticated user requires `storage.objects.list`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions)
+ // to use this method. To return object ACLs, the authenticated user must also
+ // have the `storage.objects.getIamPolicy` permission.
ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error)
// Rewrites a source object to a destination object. Optionally overrides
// metadata.
RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error)
- // Starts a resumable write. How long the write operation remains valid, and
- // what happens when the write operation becomes invalid, are
- // service-dependent.
+ // Starts a resumable write operation. This
+ // method is part of the [Resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // This allows you to upload large objects in multiple chunks, which is more
+ // resilient to network interruptions than a single upload. The validity
+ // duration of the write operation, and the consequences of it becoming
+ // invalid, are service-dependent.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error)
- // Determines the `persisted_size` for an object that is being written, which
- // can then be used as the `write_offset` for the next `Write()` call.
+ // Determines the `persisted_size` of an object that is being written. This
+ // method is part of the [resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // The returned value is the size of the object that has been persisted so
+ // far. The value can be used as the `write_offset` for the next `Write()`
+ // call.
//
- // If the object does not exist (i.e., the object has been deleted, or the
- // first `Write()` has not yet reached the service), this method returns the
+ // If the object does not exist, meaning if it was deleted, or the
+ // first `Write()` has not yet reached the service, this method returns the
// error `NOT_FOUND`.
//
- // The client **may** call `QueryWriteStatus()` at any time to determine how
- // much data has been processed for this object. This is useful if the
- // client is buffering data and needs to know which data can be safely
- // evicted. For any sequence of `QueryWriteStatus()` calls for a given
- // object name, the sequence of returned `persisted_size` values will be
+ // This method is useful for clients that buffer data and need to know which
+ // data can be safely evicted. The client can call `QueryWriteStatus()` at any
+ // time to determine how much data has been logged for this object.
+ // For any sequence of `QueryWriteStatus()` calls for a given
+ // object name, the sequence of returned `persisted_size` values are
// non-decreasing.
QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error)
- // Retrieves the name of a project's Google Cloud Storage service account.
- GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error)
- // Creates a new HMAC key for the given service account.
- CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error)
- // Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error)
- // Gets an existing HMAC key metadata for the given id.
- GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error)
- // Lists HMAC keys under a given project with the additional filters provided.
- ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error)
- // Updates a given HMAC key state between ACTIVE and INACTIVE.
- UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Moves the source object to the destination object in the same bucket.
+ MoveObject(context.Context, *MoveObjectRequest) (*Object, error)
}
// UnimplementedStorageServer can be embedded to have forward compatible implementations.
@@ -10822,100 +10062,76 @@ type UnimplementedStorageServer struct {
}
func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteBucket not implemented")
}
func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetBucket not implemented")
}
func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateBucket not implemented")
}
func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ListBuckets not implemented")
}
func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented")
}
func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
}
func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
}
func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
}
func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
-}
-func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
}
func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
}
func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteObject not implemented")
}
func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method RestoreObject not implemented")
}
func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented")
}
func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetObject not implemented")
}
func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method ReadObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method ReadObject not implemented")
+}
+func (*UnimplementedStorageServer) BidiReadObject(Storage_BidiReadObjectServer) error {
+ return status1.Errorf(codes.Unimplemented, "method BidiReadObject not implemented")
}
func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method UpdateObject not implemented")
}
func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method WriteObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method WriteObject not implemented")
}
func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented")
}
func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ListObjects not implemented")
}
func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method RewriteObject not implemented")
}
func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented")
}
func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented")
-}
-func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented")
}
-func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented")
-}
-func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented")
+func (*UnimplementedStorageServer) MoveObject(context.Context, *MoveObjectRequest) (*Object, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method MoveObject not implemented")
}
func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
@@ -11084,78 +10300,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun
return interceptor(ctx, in, info, handler)
}
-func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListNotificationConfigsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListNotificationConfigs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ComposeObjectRequest)
if err := dec(in); err != nil {
@@ -11267,6 +10411,32 @@ func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error {
return x.ServerStream.SendMsg(m)
}
+func _Storage_BidiReadObject_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(StorageServer).BidiReadObject(&storageBidiReadObjectServer{stream})
+}
+
+type Storage_BidiReadObjectServer interface {
+ Send(*BidiReadObjectResponse) error
+ Recv() (*BidiReadObjectRequest, error)
+ grpc.ServerStream
+}
+
+type storageBidiReadObjectServer struct {
+ grpc.ServerStream
+}
+
+func (x *storageBidiReadObjectServer) Send(m *BidiReadObjectResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *storageBidiReadObjectServer) Recv() (*BidiReadObjectRequest, error) {
+ m := new(BidiReadObjectRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateObjectRequest)
if err := dec(in); err != nil {
@@ -11409,110 +10579,20 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
-func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetServiceAccountRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetServiceAccount(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetServiceAccount",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListHmacKeysRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListHmacKeys(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListHmacKeys",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateHmacKeyRequest)
+func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveObjectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(StorageServer).UpdateHmacKey(ctx, in)
+ return srv.(StorageServer).MoveObject(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/google.storage.v2.Storage/UpdateHmacKey",
+ FullMethod: "/google.storage.v2.Storage/MoveObject",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest))
+ return srv.(StorageServer).MoveObject(ctx, req.(*MoveObjectRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -11557,22 +10637,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateBucket",
Handler: _Storage_UpdateBucket_Handler,
},
- {
- MethodName: "DeleteNotificationConfig",
- Handler: _Storage_DeleteNotificationConfig_Handler,
- },
- {
- MethodName: "GetNotificationConfig",
- Handler: _Storage_GetNotificationConfig_Handler,
- },
- {
- MethodName: "CreateNotificationConfig",
- Handler: _Storage_CreateNotificationConfig_Handler,
- },
- {
- MethodName: "ListNotificationConfigs",
- Handler: _Storage_ListNotificationConfigs_Handler,
- },
{
MethodName: "ComposeObject",
Handler: _Storage_ComposeObject_Handler,
@@ -11614,28 +10678,8 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
Handler: _Storage_QueryWriteStatus_Handler,
},
{
- MethodName: "GetServiceAccount",
- Handler: _Storage_GetServiceAccount_Handler,
- },
- {
- MethodName: "CreateHmacKey",
- Handler: _Storage_CreateHmacKey_Handler,
- },
- {
- MethodName: "DeleteHmacKey",
- Handler: _Storage_DeleteHmacKey_Handler,
- },
- {
- MethodName: "GetHmacKey",
- Handler: _Storage_GetHmacKey_Handler,
- },
- {
- MethodName: "ListHmacKeys",
- Handler: _Storage_ListHmacKeys_Handler,
- },
- {
- MethodName: "UpdateHmacKey",
- Handler: _Storage_UpdateHmacKey_Handler,
+ MethodName: "MoveObject",
+ Handler: _Storage_MoveObject_Handler,
},
},
Streams: []grpc.StreamDesc{
@@ -11644,6 +10688,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
Handler: _Storage_ReadObject_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "BidiReadObject",
+ Handler: _Storage_BidiReadObject_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
{
StreamName: "WriteObject",
Handler: _Storage_WriteObject_Handler,
diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go
new file mode 100644
index 000000000..2fd5111fb
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/experimental.go
@@ -0,0 +1,36 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// All options in this package are experimental.
+
+package internal
+
+var (
+ // WithMetricInterval is a function which is implemented by storage package.
+ // It sets how often to emit metrics when using NewPeriodicReader and must be
+ // greater than 1 minute.
+ WithMetricInterval any // func (*time.Duration) option.ClientOption
+
+ // WithMetricExporter is a function which is implemented by storage package.
+ // Set an alternate client-side metric Exporter to emit metrics through.
+ WithMetricExporter any // func (*metric.Exporter) option.ClientOption
+
+ // WithReadStallTimeout is a function which is implemented by storage package.
+ // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption.
+ WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption
+
+ // WithGRPCBidiReads is a function which is implemented by the storage package.
+ // It sets the gRPC client to use the BidiReadObject API for downloads.
+ WithGRPCBidiReads any // func() option.ClientOption
+)
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index 2d5cf890e..6e2e4fcde 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.36.0"
+const Version = "1.51.0"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index dc79fd88b..34b676c5f 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -21,7 +21,10 @@ import (
"io"
"net"
"net/url"
+ "os"
"strings"
+ "sync"
+ "time"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
@@ -38,8 +41,16 @@ var defaultRetry *retryConfig = &retryConfig{}
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version)
const (
- xGoogHeaderKey = "x-goog-api-client"
- idempotencyHeaderKey = "x-goog-gcs-idempotency-token"
+ xGoogHeaderKey = "x-goog-api-client"
+ idempotencyHeaderKey = "x-goog-gcs-idempotency-token"
+ cookieHeaderKey = "cookie"
+ directpathCookieHeaderKey = "x-directpath-tracing-cookie"
+)
+
+var (
+ cookieHeader = sync.OnceValue(func() string {
+ return os.Getenv("GOOGLE_SDK_GO_TRACING_COOKIE")
+ })
)
// run determines whether a retry is necessary based on the config and
@@ -67,11 +78,40 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
errorFunc = retry.shouldRetry
}
+ var quitAfterTimer *time.Timer
+ if retry.maxRetryDuration != 0 {
+ quitAfterTimer = time.NewTimer(retry.maxRetryDuration)
+ defer quitAfterTimer.Stop()
+ }
+
+ var lastErr error
return internal.Retry(ctx, bo, func() (stop bool, err error) {
+ if retry.maxRetryDuration != 0 {
+ select {
+ case <-quitAfterTimer.C:
+ if lastErr == nil {
+ return true, fmt.Errorf("storage: request not sent, choose a larger value for the retry deadline (currently set to %s)", retry.maxRetryDuration)
+ }
+ return true, fmt.Errorf("storage: retry deadline of %s reached after %v attempts; last error: %w", retry.maxRetryDuration, attempts, lastErr)
+ default:
+ }
+ }
+
ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts)
- err = call(ctxWithHeaders)
+ lastErr = call(ctxWithHeaders)
+ if lastErr != nil && retry.maxAttempts != nil && attempts >= *retry.maxAttempts {
+ return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, lastErr)
+ }
attempts++
- return !errorFunc(err), err
+ retryable := errorFunc(lastErr)
+ // Explicitly check context cancellation so that we can distinguish between a
+ // DEADLINE_EXCEEDED error from the server and a user-set context deadline.
+ // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's
+ // sent by the server) in both cases.
+ if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) {
+ retryable = false
+ }
+ return !retryable, lastErr
})
}
@@ -83,6 +123,12 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int
ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID)
+
+ if c := cookieHeader(); c != "" {
+ ctx = callctx.SetHeaders(ctx, cookieHeaderKey, c)
+ ctx = callctx.SetHeaders(ctx, directpathCookieHeaderKey, c)
+ }
+
return ctx
}
@@ -102,35 +148,37 @@ func ShouldRetry(err error) bool {
if errors.Is(err, io.ErrUnexpectedEOF) {
return true
}
+ if errors.Is(err, net.ErrClosed) {
+ return true
+ }
switch e := err.(type) {
- case *net.OpError:
- if strings.Contains(e.Error(), "use of closed network connection") {
- // TODO: check against net.ErrClosed (go 1.16+) instead of string
- return true
- }
case *googleapi.Error:
// Retry on 408, 429, and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
- case *url.Error:
+ case *net.OpError, *url.Error:
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
// Unfortunately the error type is unexported, so we resort to string
// matching.
- retriable := []string{"connection refused", "connection reset"}
+ retriable := []string{"connection refused", "connection reset", "broken pipe"}
for _, s := range retriable {
if strings.Contains(e.Error(), s) {
return true
}
}
+ case *net.DNSError:
+ if e.IsTemporary {
+ return true
+ }
case interface{ Temporary() bool }:
if e.Temporary() {
return true
}
}
- // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC.
+ // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC.
if st, ok := status.FromError(err); ok {
- if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal {
+ if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded {
return true
}
}
diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go
index 56f3e3daa..3b1df8afb 100644
--- a/vendor/cloud.google.com/go/storage/notifications.go
+++ b/vendor/cloud.google.com/go/storage/notifications.go
@@ -20,8 +20,6 @@ import (
"fmt"
"regexp"
- "cloud.google.com/go/internal/trace"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
@@ -92,31 +90,7 @@ func toNotification(rn *raw.Notification) *Notification {
return n
}
-func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
- n := &Notification{
- ID: pbn.GetName(),
- EventTypes: pbn.GetEventTypes(),
- ObjectNamePrefix: pbn.GetObjectNamePrefix(),
- CustomAttributes: pbn.GetCustomAttributes(),
- PayloadFormat: pbn.GetPayloadFormat(),
- }
- n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
- return n
-}
-
-func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
- return &storagepb.NotificationConfig{
- Name: n.ID,
- Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
- n.TopicProjectID, n.TopicID),
- EventTypes: n.EventTypes,
- ObjectNamePrefix: n.ObjectNamePrefix,
- CustomAttributes: n.CustomAttributes,
- PayloadFormat: n.PayloadFormat,
- }
-}
-
-var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
+var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
@@ -144,9 +118,10 @@ func toRawNotification(n *Notification) *raw.Notification {
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
+// Note: gRPC is not supported.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.AddNotification")
+ defer func() { endSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
@@ -165,9 +140,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.Notifications")
+ defer func() { endSpan(ctx, err) }()
opts := makeStorageOpts(true, b.retry, b.userProject)
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
@@ -182,18 +158,11 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
return m
}
-func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
- m := map[string]*Notification{}
- for _, n := range ns {
- m[n.Name] = toNotificationFromProto(n)
- }
- return m
-}
-
// DeleteNotification deletes the notification with the given ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Bucket.DeleteNotification")
+ defer func() { endSpan(ctx, err) }()
opts := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go
index e72ceb78f..16d57644a 100644
--- a/vendor/cloud.google.com/go/storage/option.go
+++ b/vendor/cloud.google.com/go/storage/option.go
@@ -15,15 +15,74 @@
package storage
import (
+ "os"
+ "strconv"
+ "time"
+
+ "cloud.google.com/go/storage/experimental"
+ storageinternal "cloud.google.com/go/storage/internal"
+ "go.opentelemetry.io/otel/sdk/metric"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
)
-// storageConfig contains the Storage client option configuration that can be
+const (
+ dynamicReadReqIncreaseRateEnv = "DYNAMIC_READ_REQ_INCREASE_RATE"
+ dynamicReadReqInitialTimeoutEnv = "DYNAMIC_READ_REQ_INITIAL_TIMEOUT"
+ defaultDynamicReadReqIncreaseRate = 15.0
+ defaultDynamicReqdReqMaxTimeout = 1 * time.Hour
+ defaultDynamicReadReqMinTimeout = 500 * time.Millisecond
+ defaultTargetPercentile = 0.99
+)
+
+func init() {
+ // initialize experimental options
+ storageinternal.WithMetricExporter = withMetricExporter
+ storageinternal.WithMetricInterval = withMetricInterval
+ storageinternal.WithReadStallTimeout = withReadStallTimeout
+ storageinternal.WithGRPCBidiReads = withGRPCBidiReads
+}
+
+// getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable.
+// It returns defaultDynamicReadReqIncreaseRate if env is not set or the set value is invalid.
+func getDynamicReadReqIncreaseRateFromEnv() float64 {
+ increaseRate := os.Getenv(dynamicReadReqIncreaseRateEnv)
+ if increaseRate == "" {
+ return defaultDynamicReadReqIncreaseRate
+ }
+
+ val, err := strconv.ParseFloat(increaseRate, 64)
+ if err != nil {
+ return defaultDynamicReadReqIncreaseRate
+ }
+ return val
+}
+
+// getDynamicReadReqInitialTimeoutSecFromEnv returns the value set in the env variable.
+// It returns the passed defaultVal if env is not set or the set value is invalid.
+func getDynamicReadReqInitialTimeoutSecFromEnv(defaultVal time.Duration) time.Duration {
+ initialTimeout := os.Getenv(dynamicReadReqInitialTimeoutEnv)
+ if initialTimeout == "" {
+ return defaultVal
+ }
+
+ val, err := time.ParseDuration(initialTimeout)
+ if err != nil {
+ return defaultVal
+ }
+ return val
+}
+
// set through storageClientOptions.
type storageConfig struct {
- useJSONforReads bool
- readAPIWasSet bool
+ useJSONforReads bool
+ readAPIWasSet bool
+ disableClientMetrics bool
+ metricExporter *metric.Exporter
+ metricInterval time.Duration
+ manualReader *metric.ManualReader
+ readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
+ grpcBidiReads bool
}
// newStorageConfig generates a new storageConfig with all the given
@@ -44,10 +103,14 @@ type storageClientOption interface {
ApplyStorageOpt(*storageConfig)
}
-// WithJSONReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the JSON API for object reads. Currently, the
-// default API used for reads is XML.
-// Setting this option is required to use the GenerationNotMatch condition.
+// WithJSONReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage JSON API for object
+// reads. Currently, the default API used for reads is XML, but JSON will
+// become the default in a future release.
+//
+// Setting this option is required to use the GenerationNotMatch condition. We
+// also recommend using JSON reads to ensure consistency with other client
+// operations (all of which use JSON by default).
//
// Note that when this option is set, reads will return a zero date for
// [ReaderObjectAttrs].LastModified and may return a different value for
@@ -56,10 +119,11 @@ func WithJSONReads() option.ClientOption {
return &withReadAPI{useJSON: true}
}
-// WithXMLReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the XML API for object reads.
+// WithXMLReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage XML API for object reads.
//
-// This is the current default.
+// This is the current default, but the default will switch to JSON in a future
+// release.
func WithXMLReads() option.ClientOption {
return &withReadAPI{useJSON: false}
}
@@ -73,3 +137,120 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
c.useJSONforReads = w.useJSON
c.readAPIWasSet = true
}
+
+type withDisabledClientMetrics struct {
+ internaloption.EmbeddableAdapter
+ disabledClientMetrics bool
+}
+
+// WithDisabledClientMetrics is an option that may be passed to [NewClient].
+// gRPC metrics are enabled by default in the GCS client and will export the
+// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+// [Google Cloud Monitoring]. The option is used to disable metrics.
+// Google Cloud Support can use this information to more quickly diagnose
+// problems related to GCS and gRPC.
+// Sending this data does not incur any billing charges, and requires minimal
+// CPU (a single RPC every few minutes) or memory (a few KiB to batch the
+// telemetry).
+//
+// The default is to enable client metrics. To opt-out of metrics collected use
+// this option.
+//
+// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
+func WithDisabledClientMetrics() option.ClientOption {
+ return &withDisabledClientMetrics{disabledClientMetrics: true}
+}
+
+func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) {
+ c.disableClientMetrics = w.disabledClientMetrics
+}
+
+type withMeterOptions struct {
+ internaloption.EmbeddableAdapter
+ // set sampling interval
+ interval time.Duration
+}
+
+func withMetricInterval(interval time.Duration) option.ClientOption {
+ return &withMeterOptions{interval: interval}
+}
+
+func (w *withMeterOptions) ApplyStorageOpt(c *storageConfig) {
+ c.metricInterval = w.interval
+}
+
+type withMetricExporterConfig struct {
+ internaloption.EmbeddableAdapter
+ // exporter override
+ metricExporter *metric.Exporter
+}
+
+func withMetricExporter(ex *metric.Exporter) option.ClientOption {
+ return &withMetricExporterConfig{metricExporter: ex}
+}
+
+func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) {
+ c.metricExporter = w.metricExporter
+}
+
+type withTestMetricReaderConfig struct {
+ internaloption.EmbeddableAdapter
+ // reader override
+ metricReader *metric.ManualReader
+}
+
+func withTestMetricReader(ex *metric.ManualReader) option.ClientOption {
+ return &withTestMetricReaderConfig{metricReader: ex}
+}
+
+func (w *withTestMetricReaderConfig) ApplyStorageOpt(c *storageConfig) {
+ c.manualReader = w.metricReader
+}
+
+// WithReadStallTimeout is an option that may be passed to [NewClient].
+// It enables the client to retry the stalled read request, happens as part of
+// storage.Reader creation. As the name suggest, timeout is adjusted dynamically
+// based on past observed read-req latencies.
+//
+// This is only supported for the read operation and that too for http(XML) client.
+// Grpc read-operation will be supported soon.
+func withReadStallTimeout(rstc *experimental.ReadStallTimeoutConfig) option.ClientOption {
+ // TODO (raj-prince): To keep separate dynamicDelay instance for different BucketHandle.
+ // Currently, dynamicTimeout is kept at the client and hence shared across all the
+ // BucketHandle, which is not the ideal state. As latency depends on location of VM
+ // and Bucket, and read latency of different buckets may lie in different range.
+ // Hence having a separate dynamicTimeout instance at BucketHandle level will
+ // be better
+ if rstc.Min == time.Duration(0) {
+ rstc.Min = defaultDynamicReadReqMinTimeout
+ }
+ if rstc.TargetPercentile == 0 {
+ rstc.TargetPercentile = defaultTargetPercentile
+ }
+ return &withReadStallTimeoutConfig{
+ readStallTimeoutConfig: rstc,
+ }
+}
+
+type withReadStallTimeoutConfig struct {
+ internaloption.EmbeddableAdapter
+ readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
+}
+
+func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) {
+ config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig
+}
+
+func withGRPCBidiReads() option.ClientOption {
+ return &withGRPCBidiReadsConfig{}
+}
+
+type withGRPCBidiReadsConfig struct {
+ internaloption.EmbeddableAdapter
+}
+
+func (w *withGRPCBidiReadsConfig) ApplyStorageOpt(config *storageConfig) {
+ config.grpcBidiReads = true
+}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
index 4673a68d0..634abbeef 100644
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ b/vendor/cloud.google.com/go/storage/reader.go
@@ -22,6 +22,7 @@ import (
"io/ioutil"
"net/http"
"strings"
+ "sync"
"time"
"cloud.google.com/go/internal/trace"
@@ -65,6 +66,19 @@ type ReaderObjectAttrs struct {
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
+
+ // CRC32C is the CRC32 checksum of the entire object's content using the
+ // Castagnoli93 polynomial, if available.
+ CRC32C uint32
+
+ // Decompressed is true if the object is stored as a gzip file and was
+ // decompressed when read.
+ // Objects are automatically decompressed if the object's metadata property
+ // "Content-Encoding" is set to "gzip" or satisfies decompressive
+ // transcoding as per https://cloud.google.com/storage/docs/transcoding.
+ //
+ // To prevent decompression on reads, use [ObjectHandle.ReadCompressed].
+ Decompressed bool
}
// NewReader creates a new Reader to read the contents of the
@@ -72,6 +86,12 @@ type ReaderObjectAttrs struct {
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
@@ -85,7 +105,14 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
// that file will be served back whole, regardless of the requested range as
-// Google Cloud Storage dictates.
+// Google Cloud Storage dictates. If decompressive transcoding occurs,
+// [Reader.Attrs.Decompressed] will be true.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
// This span covers the life of the reader. It is closed via the context
// in Reader.Close.
@@ -114,6 +141,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
encryptionKey: o.encryptionKey,
conds: o.conds,
readCompressed: o.readCompressed,
+ handle: &o.readHandle,
}
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
@@ -129,6 +157,49 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
return r, err
}
+// NewMultiRangeDownloader creates a multi-range reader for an object.
+// Must be called on a gRPC client created using [NewGRPCClient].
+//
+// This uses the gRPC-specific bi-directional read API, which is in private
+// preview; please contact your account manager if interested.
+func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiRangeDownloader, err error) {
+ // This span covers the life of the reader. It is closed via the context
+ // in Reader.Close.
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.MultiRangeDownloader")
+
+ if err := o.validate(); err != nil {
+ return nil, err
+ }
+ if o.conds != nil {
+ if err := o.conds.validate("NewMultiRangeDownloader"); err != nil {
+ return nil, err
+ }
+ }
+
+ opts := makeStorageOpts(true, o.retry, o.userProject)
+
+ params := &newMultiRangeDownloaderParams{
+ bucket: o.bucket,
+ conds: o.conds,
+ encryptionKey: o.encryptionKey,
+ gen: o.gen,
+ object: o.object,
+ handle: &o.readHandle,
+ }
+
+ r, err := o.c.tc.NewMultiRangeDownloader(ctx, params, opts...)
+
+ // Pass the context so that the span can be closed in MultiRangeDownloader.Close(), or close the
+ // span now if there is an error.
+ if err == nil {
+ r.ctx = ctx
+ } else {
+ trace.EndSpan(ctx, err)
+ }
+
+ return r, err
+}
+
// decompressiveTranscoding returns true if the request was served decompressed
// and different than its original storage form. This happens when the "Content-Encoding"
// header is "gzip".
@@ -196,14 +267,16 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
- Attrs ReaderObjectAttrs
+ Attrs ReaderObjectAttrs
+ objectMetadata *map[string]string
+
seen, remain, size int64
- checkCRC bool // should we check the CRC?
- wantCRC uint32 // the CRC32c value the server sent in the header
- gotCRC uint32 // running crc
+ checkCRC bool // Did we check the CRC? This is now only used by tests.
reader io.ReadCloser
ctx context.Context
+ mu sync.Mutex
+ handle *ReadHandle
}
// Close closes the Reader. It must be called when done reading.
@@ -218,17 +291,17 @@ func (r *Reader) Read(p []byte) (int, error) {
if r.remain != -1 {
r.remain -= int64(n)
}
- if r.checkCRC {
- r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
- // Check CRC here. It would be natural to check it in Close, but
- // everybody defers Close on the assumption that it doesn't return
- // anything worth looking at.
- if err == io.EOF {
- if r.gotCRC != r.wantCRC {
- return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
- r.gotCRC, r.wantCRC)
- }
- }
+ return n, err
+}
+
+// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface.
+// This is called implicitly when calling io.Copy on a Reader.
+func (r *Reader) WriteTo(w io.Writer) (int64, error) {
+ // This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an
+ // implementation of WriteTo.
+ n, err := io.Copy(w, r.reader)
+ if r.remain != -1 {
+ r.remain -= int64(n)
}
return n, err
}
@@ -274,3 +347,99 @@ func (r *Reader) CacheControl() string {
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}
+
+// Metadata returns user-provided metadata, in key/value pairs.
+//
+// It can be nil if no metadata is present, or if the client uses the JSON
+// API for downloads. Only the XML and gRPC APIs support getting
+// custom metadata via the Reader; for JSON make a separate call to
+// ObjectHandle.Attrs.
+func (r *Reader) Metadata() map[string]string {
+ if r.objectMetadata != nil {
+ return *r.objectMetadata
+ }
+ return nil
+}
+
+// ReadHandle returns the read handle associated with an object.
+// ReadHandle will be periodically refreshed.
+//
+// ReadHandle requires the gRPC-specific bi-directional read API, which is in
+// private preview; please contact your account manager if interested.
+// Note that this only valid for gRPC and only with zonal buckets.
+func (r *Reader) ReadHandle() ReadHandle {
+ if r.handle == nil {
+ r.handle = &ReadHandle{}
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return (*r.handle)
+}
+
+// MultiRangeDownloader reads a Cloud Storage object.
+//
+// Typically, a MultiRangeDownloader opens a stream to which we can add
+// different ranges to read from the object.
+//
+// This API is currently in preview and is not yet available for general use.
+type MultiRangeDownloader struct {
+ Attrs ReaderObjectAttrs
+ reader multiRangeDownloader
+ ctx context.Context
+}
+
+type multiRangeDownloader interface {
+ add(output io.Writer, offset, limit int64, callback func(int64, int64, error))
+ wait()
+ close() error
+ getHandle() []byte
+}
+
+// Add adds a new range to MultiRangeDownloader.
+//
+// The offset for the first byte to return in the read, relative to the start
+// of the object.
+//
+// A negative offset value will be interpreted as the number of bytes from the
+// end of the object to be returned. Requesting a negative offset with magnitude
+// larger than the size of the object will return the entire object. An offset
+// larger than the size of the object will result in an OutOfRange error.
+//
+// A limit of zero indicates that there is no limit, and a negative limit will
+// cause an error.
+//
+// This will initiate the read range but is non-blocking; call callback to
+// process the result. Add is thread-safe and can be called simultaneously
+// from different goroutines.
+//
+// Callback will be called with the offset, length of data read, and error
+// of the read. Note that the length of the data read may be less than the
+// requested length if the end of the object is reached.
+func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, callback func(int64, int64, error)) {
+ mrd.reader.add(output, offset, length, callback)
+}
+
+// Close the MultiRangeDownloader. It must be called when done reading.
+// Adding new ranges after this has been called will cause an error.
+//
+// This will immediately close the stream and can result in a
+// "stream closed early" error if a response for a range is still not processed.
+// Call [MultiRangeDownloader.Wait] to avoid this error.
+func (mrd *MultiRangeDownloader) Close() error {
+ err := mrd.reader.close()
+ trace.EndSpan(mrd.ctx, err)
+ return err
+}
+
+// Wait for all the responses to process on the stream.
+// Adding new ranges after this has been called will cause an error.
+// Wait will wait for all callbacks to finish.
+func (mrd *MultiRangeDownloader) Wait() {
+ mrd.reader.wait()
+}
+
+// GetHandle returns the read handle. This can be used to further speed up the
+// follow up read if the same object is read through a different stream.
+func (mrd *MultiRangeDownloader) GetHandle() []byte {
+ return mrd.reader.getHandle()
+}
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 78ecbf0e8..c6f18c63f 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -43,6 +43,9 @@ import (
"cloud.google.com/go/storage/internal"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/googleapis/gax-go/v2"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
@@ -50,6 +53,10 @@ import (
raw "google.golang.org/api/storage/v1"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats/opentelemetry"
+ "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/fieldmaskpb"
@@ -60,15 +67,17 @@ import (
var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true}
var (
- // ErrBucketNotExist indicates that the bucket does not exist.
+ // ErrBucketNotExist indicates that the bucket does not exist. It should be
+ // checked for using [errors.Is] instead of direct equality.
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
- // ErrObjectNotExist indicates that the object does not exist.
+ // ErrObjectNotExist indicates that the object does not exist. It should be
+ // checked for using [errors.Is] instead of direct equality.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
// errMethodNotSupported indicates that the method called is not currently supported by the client.
// TODO: Export this error when launching the transport-agnostic client.
errMethodNotSupported = errors.New("storage: method is not currently supported")
- // errMethodNotValid indicates that given HTTP method is not valid.
- errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
+ // errSignedURLMethodNotValid indicates that given HTTP method is not valid.
+ errSignedURLMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
)
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version)
@@ -117,10 +126,6 @@ type Client struct {
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
tc storageClient
- // useGRPC flags whether the client uses gRPC. This is needed while the
- // integration piece is only partially complete.
- // TODO: remove before merging to main.
- useGRPC bool
}
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
@@ -146,8 +151,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// Prepend default options to avoid overriding options passed by the user.
opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl, "https://www.googleapis.com/auth/cloud-platform"), option.WithUserAgent(userAgent)}, opts...)
- opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/"))
- opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"))
+ opts = append(opts, internaloption.WithDefaultEndpointTemplate("https://storage.UNIVERSE_DOMAIN/storage/v1/"),
+ internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ )
// Don't error out here. The user may have passed in their own HTTP
// client which does not auth with ADC or other common conventions.
@@ -178,12 +185,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
opts = append([]option.ClientOption{
option.WithoutAuthentication(),
internaloption.SkipDialSettingsValidation(),
- internaloption.WithDefaultEndpoint(endpoint),
+ internaloption.WithDefaultEndpointTemplate(endpoint),
internaloption.WithDefaultMTLSEndpoint(endpoint),
}, opts...)
}
- // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint.
+ // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpointTemplate, and WithDefaultMTLSEndpoint.
hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("dialing: %w", err)
@@ -216,11 +223,10 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// NewGRPCClient creates a new Storage client using the gRPC transport and API.
// Client methods which have not been implemented in gRPC will return an error.
-// In particular, methods for Cloud Pub/Sub notifications are not supported.
-//
-// The storage gRPC API is still in preview and not yet publicly available.
-// If you would like to use the API, please first contact your GCP account rep to
-// request access. The API may be subject to breaking changes.
+// In particular, methods for Cloud Pub/Sub notifications, Service Account HMAC
+// keys, and ServiceAccount are not supported.
+// Using a non-default universe domain is also not supported with the Storage
+// gRPC client.
//
// Clients should be reused instead of created as needed. The methods of Client
// are safe for concurrent use by multiple goroutines.
@@ -228,13 +234,68 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// You may configure the client by passing in options from the [google.golang.org/api/option]
// package.
func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
- opts = append(defaultGRPCOptions(), opts...)
tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...))
if err != nil {
return nil, err
}
- return &Client{tc: tc, useGRPC: true}, nil
+ return &Client{tc: tc}, nil
+}
+
+// CheckDirectConnectivitySupported checks if gRPC direct connectivity
+// is available for a specific bucket from the environment where the client
+// is running. A `nil` error represents Direct Connectivity was detected.
+// Direct connectivity is expected to be available when running from inside
+// GCP and connecting to a bucket in the same region.
+//
+// Experimental helper that's subject to change.
+//
+// You can pass in [option.ClientOption] you plan on passing to [NewGRPCClient]
+func CheckDirectConnectivitySupported(ctx context.Context, bucket string, opts ...option.ClientOption) error {
+ view := metric.NewView(
+ metric.Instrument{
+ Name: "grpc.client.attempt.duration",
+ Kind: metric.InstrumentKindHistogram,
+ },
+ metric.Stream{AttributeFilter: attribute.NewAllowKeysFilter("grpc.lb.locality")},
+ )
+ mr := metric.NewManualReader()
+ provider := metric.NewMeterProvider(metric.WithReader(mr), metric.WithView(view))
+ // Provider handles shutting down ManualReader
+ defer provider.Shutdown(ctx)
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: stats.NewMetrics("grpc.client.attempt.duration"),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ combinedOpts := append(opts, WithDisabledClientMetrics(), option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})))
+ client, err := NewGRPCClient(ctx, combinedOpts...)
+ if err != nil {
+ return fmt.Errorf("storage.NewGRPCClient: %w", err)
+ }
+ defer client.Close()
+ if _, err = client.Bucket(bucket).Attrs(ctx); err != nil {
+ return fmt.Errorf("Bucket.Attrs: %w", err)
+ }
+ // Call manual reader to collect metric
+ rm := metricdata.ResourceMetrics{}
+ if err = mr.Collect(context.Background(), &rm); err != nil {
+ return fmt.Errorf("ManualReader.Collect: %w", err)
+ }
+ for _, sm := range rm.ScopeMetrics {
+ for _, m := range sm.Metrics {
+ if m.Name == "grpc.client.attempt.duration" {
+ hist := m.Data.(metricdata.Histogram[float64])
+ for _, d := range hist.DataPoints {
+ v, present := d.Attributes.Value("grpc.lb.locality")
+ if present && v.AsString() != "" && v.AsString() != "{}" {
+ return nil
+ }
+ }
+ }
+ }
+ }
+ return errors.New("storage: direct connectivity not detected")
}
// Close closes the Client.
@@ -299,7 +360,11 @@ func (s pathStyle) host(hostname, bucket string) string {
return "storage.googleapis.com"
}
-func (s virtualHostedStyle) host(_, bucket string) string {
+func (s virtualHostedStyle) host(hostname, bucket string) string {
+ if hostname != "" {
+ return bucket + "." + stripScheme(hostname)
+ }
+
if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
return bucket + "." + stripScheme(host)
}
@@ -455,7 +520,7 @@ type SignedURLOptions struct {
// Hostname sets the host of the signed URL. This field overrides any
// endpoint set on a storage Client or through STORAGE_EMULATOR_HOST.
- // Only compatible with PathStyle URLStyle.
+ // Only compatible with PathStyle and VirtualHostedStyle URLStyles.
// Optional.
Hostname string
}
@@ -628,7 +693,7 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error {
}
opts.Method = strings.ToUpper(opts.Method)
if _, ok := signedURLMethods[opts.Method]; !ok {
- return errMethodNotValid
+ return errSignedURLMethodNotValid
}
if opts.Expires.IsZero() {
return errors.New("storage: missing required expires option")
@@ -743,7 +808,7 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st
}
var headersWithValue []string
- headersWithValue = append(headersWithValue, "host:"+u.Host)
+ headersWithValue = append(headersWithValue, "host:"+u.Hostname())
headersWithValue = append(headersWithValue, opts.Headers...)
if opts.ContentType != "" {
headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType)
@@ -876,6 +941,9 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
return u.String(), nil
}
+// ReadHandle associated with the object. This is periodically refreshed.
+type ReadHandle []byte
+
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
@@ -890,6 +958,24 @@ type ObjectHandle struct {
readCompressed bool // Accept-Encoding: gzip
retry *retryConfig
overrideRetention *bool
+ softDeleted bool
+ readHandle ReadHandle
+}
+
+// ReadHandle returns a new ObjectHandle that uses the ReadHandle to open the objects.
+//
+// Objects that have already been opened can be opened an additional time,
+// using a read handle returned in the response, at lower latency.
+// This produces the exact same object and generation and does not check if
+// the generation is still the newest one.
+// Note that this will be a noop unless it's set on a gRPC client on buckets with
+// bi-directional read API access.
+// Also note that you can get a ReadHandle only via calling reader.ReadHandle() on a
+// previous read of the same object.
+func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle {
+ o2 := *o
+ o2.readHandle = r
+ return &o2
}
// ACL provides access to the object's access control list.
@@ -937,22 +1023,22 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Object.Attrs")
+ defer func() { endSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
opts := makeStorageOpts(true, o.retry, o.userProject)
- return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...)
+ return o.c.tc.GetObject(ctx, &getObjectParams{o.bucket, o.object, o.gen, o.encryptionKey, o.conds, o.softDeleted}, opts...)
}
// Update updates an object with the provided attributes. See
// ObjectAttrsToUpdate docs for details on treatment of zero values.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
- defer func() { trace.EndSpan(ctx, err) }()
+ ctx, _ = startSpan(ctx, "Object.Update")
+ defer func() { endSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
@@ -967,7 +1053,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
gen: o.gen,
encryptionKey: o.encryptionKey,
conds: o.conds,
- overrideRetention: o.overrideRetention}, opts...)
+ overrideRetention: o.overrideRetention,
+ }, opts...)
}
// BucketName returns the name of the bucket.
@@ -1049,6 +1136,82 @@ func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle {
return &o2
}
+// SoftDeleted returns an object handle that can be used to get an object that
+// has been soft deleted. To get a soft deleted object, the generation must be
+// set on the object using ObjectHandle.Generation.
+// Note that an error will be returned if a live object is queried using this.
+func (o *ObjectHandle) SoftDeleted() *ObjectHandle {
+ o2 := *o
+ o2.softDeleted = true
+ return &o2
+}
+
+// RestoreOptions allows you to set options when restoring an object.
+type RestoreOptions struct {
+ /// CopySourceACL indicates whether the restored object should copy the
+ // access controls of the source object. Only valid for buckets with
+ // fine-grained access. If uniform bucket-level access is enabled, setting
+ // CopySourceACL will cause an error.
+ CopySourceACL bool
+}
+
+// Restore will restore a soft-deleted object to a live object.
+// Note that you must specify a generation to use this method.
+func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*ObjectAttrs, error) {
+ if err := o.validate(); err != nil {
+ return nil, err
+ }
+
+ // Since the generation is required by restore calls, we set the default to
+ // 0 instead of a negative value, which returns a more descriptive error.
+ gen := o.gen
+ if o.gen == defaultGen {
+ gen = 0
+ }
+
+ // Restore is always idempotent because Generation is a required param.
+ sOpts := makeStorageOpts(true, o.retry, o.userProject)
+ return o.c.tc.RestoreObject(ctx, &restoreObjectParams{
+ bucket: o.bucket,
+ object: o.object,
+ gen: gen,
+ conds: o.conds,
+ copySourceACL: opts.CopySourceACL,
+ }, sOpts...)
+}
+
+// Move changes the name of the object to the destination name.
+// It can only be used to rename an object within the same bucket. The
+// bucket must have [HierarchicalNamespace] enabled to use this method.
+//
+// Any preconditions set on the ObjectHandle will be applied for the source
+// object. Set preconditions on the destination object using
+// [MoveObjectDestination.Conditions].
+//
+// This API is in preview and is not yet publicly available.
+func (o *ObjectHandle) Move(ctx context.Context, destination MoveObjectDestination) (*ObjectAttrs, error) {
+ if err := o.validate(); err != nil {
+ return nil, err
+ }
+
+ sOpts := makeStorageOpts(true, o.retry, o.userProject)
+ return o.c.tc.MoveObject(ctx, &moveObjectParams{
+ bucket: o.bucket,
+ srcObject: o.object,
+ dstObject: destination.Object,
+ srcConds: o.conds,
+ dstConds: destination.Conditions,
+ encryptionKey: o.encryptionKey,
+ }, sOpts...)
+}
+
+// MoveObjectDestination provides the destination object name and (optional) preconditions
+// for [ObjectHandle.Move].
+type MoveObjectDestination struct {
+ Object string
+ Conditions *Conditions
+}
+
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@@ -1088,6 +1251,10 @@ func (o *ObjectHandle) validate() error {
if !utf8.ValidString(o.object) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
}
+ // Names . and .. are not valid; see https://cloud.google.com/storage/docs/objects#naming
+ if o.object == "." || o.object == ".." {
+ return fmt.Errorf("storage: object name %q is not valid", o.object)
+ }
return nil
}
@@ -1165,6 +1332,7 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
Acl: toProtoObjectACL(o.ACL),
Metadata: o.Metadata,
CreateTime: toProtoTimestamp(o.Created),
+ FinalizeTime: toProtoTimestamp(o.Finalized),
CustomTime: toProtoTimestamp(o.CustomTime),
DeleteTime: toProtoTimestamp(o.Deleted),
RetentionExpireTime: toProtoTimestamp(o.RetentionExpirationTime),
@@ -1327,6 +1495,10 @@ type ObjectAttrs struct {
// Created is the time the object was created. This field is read-only.
Created time.Time
+ // Finalized is the time the object contents were finalized. This may differ
+ // from Created for appendable objects. This field is read-only.
+ Finalized time.Time
+
// Deleted is the time the object was deleted.
// If not deleted, it is the zero value. This field is read-only.
Deleted time.Time
@@ -1378,6 +1550,21 @@ type ObjectAttrs struct {
// Retention contains the retention configuration for this object.
// ObjectRetention cannot be configured or reported through the gRPC API.
Retention *ObjectRetention
+
+ // SoftDeleteTime is the time when the object became soft-deleted.
+ // Soft-deleted objects are only accessible on an object handle returned by
+ // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set,
+ // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted.
+ // This field is read-only.
+ SoftDeleteTime time.Time
+
+ // HardDeleteTime is the time when the object will be permanently deleted.
+ // Only set when an object becomes soft-deleted with a soft delete policy.
+ // Soft-deleted objects are only accessible on an object handle returned by
+ // ObjectHandle.SoftDeleted; if ObjectHandle.SoftDeleted has not been set,
+ // ObjectHandle.Attrs will return ErrObjectNotExist if the object is soft-deleted.
+ // This field is read-only.
+ HardDeleteTime time.Time
}
// ObjectRetention contains the retention configuration for this object.
@@ -1476,12 +1663,15 @@ func newObject(o *raw.Object) *ObjectAttrs {
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
+ Finalized: convertTime(o.TimeFinalized),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Etag: o.Etag,
CustomTime: convertTime(o.CustomTime),
ComponentCount: o.ComponentCount,
Retention: toObjectRetention(o.Retention),
+ SoftDeleteTime: convertTime(o.SoftDeleteTime),
+ HardDeleteTime: convertTime(o.HardDeleteTime),
}
}
@@ -1513,10 +1703,13 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()),
KMSKeyName: o.GetKmsKey(),
Created: convertProtoTime(o.GetCreateTime()),
+ Finalized: convertProtoTime(o.GetFinalizeTime()),
Deleted: convertProtoTime(o.GetDeleteTime()),
Updated: convertProtoTime(o.GetUpdateTime()),
CustomTime: convertProtoTime(o.GetCustomTime()),
ComponentCount: int64(o.ComponentCount),
+ SoftDeleteTime: convertProtoTime(o.GetSoftDeleteTime()),
+ HardDeleteTime: convertProtoTime(o.GetHardDeleteTime()),
}
}
@@ -1620,6 +1813,15 @@ type Query struct {
// for syntax details. When Delimiter is set in conjunction with MatchGlob,
// it must be set to /.
MatchGlob string
+
+ // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of
+ // prefixes returned by the query. Only applicable if Delimiter is set to /.
+ IncludeFoldersAsPrefixes bool
+
+ // SoftDeleted indicates whether to list soft-deleted objects.
+ // If true, only objects that have been soft-deleted will be listed.
+ // By default, soft-deleted objects are not listed.
+ SoftDeleted bool
}
// attrToFieldMap maps the field names of ObjectAttrs to the underlying field
@@ -1649,12 +1851,15 @@ var attrToFieldMap = map[string]string{
"CustomerKeySHA256": "customerEncryption",
"KMSKeyName": "kmsKeyName",
"Created": "timeCreated",
+ "Finalized": "timeFinalized",
"Deleted": "timeDeleted",
"Updated": "updated",
"Etag": "etag",
"CustomTime": "customTime",
"ComponentCount": "componentCount",
"Retention": "retention",
+ "HardDeleteTime": "hardDeleteTime",
+ "SoftDeleteTime": "softDeleteTime",
}
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
@@ -1675,6 +1880,7 @@ var attrToProtoFieldMap = map[string]string{
"Deleted": "delete_time",
"ContentType": "content_type",
"Created": "create_time",
+ "Finalized": "finalize_time",
"CRC32C": "checksums.crc32c",
"MD5": "checksums.md5_hash",
"Updated": "update_time",
@@ -1687,6 +1893,8 @@ var attrToProtoFieldMap = map[string]string{
"CustomerKeySHA256": "customer_encryption",
"CustomTime": "custom_time",
"ComponentCount": "component_count",
+ "HardDeleteTime": "hard_delete_time",
+ "SoftDeleteTime": "soft_delete_time",
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
// "MediaLink": "mediaLink",
// TODO: add object retention - b/308194853
@@ -1912,56 +2120,91 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e
return nil
}
-func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
+// applySourceConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+// This is specifically for calls like Rewrite and Move which have a source and destination
+// object.
+func applySourceConds(method string, gen int64, conds *Conditions, call interface{}) error {
+ cval := reflect.ValueOf(call)
if gen >= 0 {
- call.SourceGeneration(gen)
+ if !setSourceGeneration(cval, gen) {
+ return fmt.Errorf("storage: %s: source generation not supported", method)
+ }
}
if conds == nil {
return nil
}
- if err := conds.validate("CopyTo source"); err != nil {
+ if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
- call.IfSourceGenerationMatch(conds.GenerationMatch)
+ if !setIfSourceGenerationMatch(cval, conds.GenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
+ }
case conds.GenerationNotMatch != 0:
- call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
+ if !setIfSourceGenerationNotMatch(cval, conds.GenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
+ }
case conds.DoesNotExist:
- call.IfSourceGenerationMatch(0)
+ if !setIfSourceGenerationMatch(cval, int64(0)) {
+ return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+ }
}
switch {
case conds.MetagenerationMatch != 0:
- call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
+ if !setIfSourceMetagenerationMatch(cval, conds.MetagenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
+ }
case conds.MetagenerationNotMatch != 0:
- call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
+ if !setIfSourceMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
+ }
}
return nil
}
-func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error {
+// applySourceCondsProto validates and attempts to set the conditions on a protobuf
+// message using protobuf reflection. This is specifically for RPCs which have separate
+// preconditions for source and destination objects (e.g. Rewrite and Move).
+func applySourceCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
+ rmsg := msg.ProtoReflect()
+
if gen >= 0 {
- call.SourceGeneration = gen
+ if !setConditionProtoField(rmsg, "source_generation", gen) {
+ return fmt.Errorf("storage: %s: generation not supported", method)
+ }
}
if conds == nil {
return nil
}
- if err := conds.validate("CopyTo source"); err != nil {
+ if err := conds.validate(method); err != nil {
return err
}
+
switch {
case conds.GenerationMatch != 0:
- call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch)
+ if !setConditionProtoField(rmsg, "if_source_generation_match", conds.GenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
+ }
case conds.GenerationNotMatch != 0:
- call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
+ if !setConditionProtoField(rmsg, "if_source_generation_not_match", conds.GenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
+ }
case conds.DoesNotExist:
- call.IfSourceGenerationMatch = proto.Int64(0)
+ if !setConditionProtoField(rmsg, "if_source_generation_match", int64(0)) {
+ return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+ }
}
switch {
case conds.MetagenerationMatch != 0:
- call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
+ if !setConditionProtoField(rmsg, "if_source_metageneration_match", conds.MetagenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
+ }
case conds.MetagenerationNotMatch != 0:
- call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
+ if !setConditionProtoField(rmsg, "if_source_metageneration_not_match", conds.MetagenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
+ }
}
return nil
}
@@ -2000,6 +2243,27 @@ func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value)
}
+// More methods to set source object precondition fields (used by Rewrite and Move APIs).
+func setSourceGeneration(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("SourceGeneration"), value)
+}
+
+func setIfSourceGenerationMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceGenerationMatch"), value)
+}
+
+func setIfSourceGenerationNotMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceGenerationNotMatch"), value)
+}
+
+func setIfSourceMetagenerationMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceMetagenerationMatch"), value)
+}
+
+func setIfSourceMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceMetagenerationNotMatch"), value)
+}
+
func setCondition(setter reflect.Value, value interface{}) bool {
if setter.IsValid() {
setter.Call([]reflect.Value{reflect.ValueOf(value)})
@@ -2076,6 +2340,26 @@ func (wb *withBackoff) apply(config *retryConfig) {
config.backoff = &wb.backoff
}
+// WithMaxAttempts configures the maximum number of times an API call can be made
+// in the case of retryable errors.
+// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5
+// times total (initial call plus 4 retries).
+// Without this setting, operations will continue retrying indefinitely
+// until either the context is canceled or a deadline is reached.
+func WithMaxAttempts(maxAttempts int) RetryOption {
+ return &withMaxAttempts{
+ maxAttempts: maxAttempts,
+ }
+}
+
+type withMaxAttempts struct {
+ maxAttempts int
+}
+
+func (wb *withMaxAttempts) apply(config *retryConfig) {
+ config.maxAttempts = &wb.maxAttempts
+}
+
// RetryPolicy describes the available policies for which operations should be
// retried. The default is `RetryIdempotent`.
type RetryPolicy int
@@ -2148,6 +2432,11 @@ type retryConfig struct {
backoff *gax.Backoff
policy RetryPolicy
shouldRetry func(err error) bool
+ maxAttempts *int
+ // maxRetryDuration, if set, specifies a deadline after which the request
+ // will no longer be retried. A value of 0 allows infinite retries.
+ // maxRetryDuration is currently only set by Writer.ChunkRetryDeadline.
+ maxRetryDuration time.Duration
}
func (r *retryConfig) clone() *retryConfig {
@@ -2165,9 +2454,11 @@ func (r *retryConfig) clone() *retryConfig {
}
return &retryConfig{
- backoff: bo,
- policy: r.policy,
- shouldRetry: r.shouldRetry,
+ backoff: bo,
+ policy: r.policy,
+ shouldRetry: r.shouldRetry,
+ maxAttempts: r.maxAttempts,
+ maxRetryDuration: r.maxRetryDuration,
}
}
@@ -2242,10 +2533,10 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
+// Note: gRPC is not supported.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
-
}
// bucketResourceName formats the given project ID and bucketResourceName ID
@@ -2341,3 +2632,25 @@ func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Mess
}
return nil
}
+
+// formatObjectErr checks if the provided error is NotFound and if so, wraps
+// it in an ErrObjectNotExist error. If not, formatObjectErr has no effect.
+func formatObjectErr(err error) error {
+ var e *googleapi.Error
+ if s, ok := status.FromError(err); (ok && s.Code() == codes.NotFound) ||
+ (errors.As(err, &e) && e.Code == http.StatusNotFound) {
+ return fmt.Errorf("%w: %w", ErrObjectNotExist, err)
+ }
+ return err
+}
+
+// formatBucketError checks if the provided error is NotFound and if so, wraps
+// it in an ErrBucketNotExist error. If not, formatBucketError has no effect.
+func formatBucketError(err error) error {
+ var e *googleapi.Error
+ if s, ok := status.FromError(err); (ok && s.Code() == codes.NotFound) ||
+ (errors.As(err, &e) && e.Code == http.StatusNotFound) {
+ return fmt.Errorf("%w: %w", ErrBucketNotExist, err)
+ }
+ return err
+}
diff --git a/vendor/cloud.google.com/go/storage/trace.go b/vendor/cloud.google.com/go/storage/trace.go
new file mode 100644
index 000000000..67858b086
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/trace.go
@@ -0,0 +1,93 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ internalTrace "cloud.google.com/go/internal/trace"
+ "cloud.google.com/go/storage/internal"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ otelcodes "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ storageOtelTracingDevVar = "GO_STORAGE_DEV_OTEL_TRACING"
+ defaultTracerName = "cloud.google.com/go/storage"
+ gcpClientRepo = "googleapis/google-cloud-go"
+ gcpClientArtifact = "cloud.google.com/go/storage"
+)
+
+// isOTelTracingDevEnabled checks the development flag until experimental feature is launched.
+// TODO: Remove development flag upon experimental launch.
+func isOTelTracingDevEnabled() bool {
+ return os.Getenv(storageOtelTracingDevVar) == "true"
+}
+
+func tracer() trace.Tracer {
+ return otel.Tracer(defaultTracerName, trace.WithInstrumentationVersion(internal.Version))
+}
+
+// startSpan creates a span and a context.Context containing the newly-created span.
+// If the context.Context provided in `ctx` contains a span then the newly-created
+// span will be a child of that span, otherwise it will be a root span.
+func startSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ name = appendPackageName(name)
+ // TODO: Remove internalTrace upon experimental launch.
+ if !isOTelTracingDevEnabled() {
+ ctx = internalTrace.StartSpan(ctx, name)
+ return ctx, nil
+ }
+ opts = append(opts, getCommonTraceOptions()...)
+ ctx, span := tracer().Start(ctx, name, opts...)
+ return ctx, span
+}
+
+// endSpan retrieves the current span from ctx and completes the span.
+// If an error occurs, the error is recorded as an exception span event for this span,
+// and the span status is set in the form of a code and a description.
+func endSpan(ctx context.Context, err error) {
+ // TODO: Remove internalTrace upon experimental launch.
+ if !isOTelTracingDevEnabled() {
+ internalTrace.EndSpan(ctx, err)
+ } else {
+ span := trace.SpanFromContext(ctx)
+ if err != nil {
+ span.SetStatus(otelcodes.Error, err.Error())
+ span.RecordError(err)
+ }
+ span.End()
+ }
+}
+
+// getCommonTraceOptions includes the common attributes used for Cloud Trace adoption tracking.
+func getCommonTraceOptions() []trace.SpanStartOption {
+ opts := []trace.SpanStartOption{
+ trace.WithAttributes(
+ attribute.String("gcp.client.version", internal.Version),
+ attribute.String("gcp.client.repo", gcpClientRepo),
+ attribute.String("gcp.client.artifact", gcpClientArtifact),
+ ),
+ }
+ return opts
+}
+
+func appendPackageName(spanName string) string {
+ return fmt.Sprintf("%s.%s", gcpClientArtifact, spanName)
+}
diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go
index aeb7ed418..5778a263b 100644
--- a/vendor/cloud.google.com/go/storage/writer.go
+++ b/vendor/cloud.google.com/go/storage/writer.go
@@ -77,17 +77,42 @@ type Writer struct {
// For uploads of larger files, the Writer will attempt to retry if the
// request to upload a particular chunk fails with a transient error.
// If a single chunk has been attempting to upload for longer than this
- // deadline and the request fails, it will no longer be retried, and the error
- // will be returned to the caller. This is only applicable for files which are
- // large enough to require a multi-chunk resumable upload. The default value
- // is 32s. Users may want to pick a longer deadline if they are using larger
- // values for ChunkSize or if they expect to have a slow or unreliable
- // internet connection.
+ // deadline and the request fails, it will no longer be retried, and the
+ // error will be returned to the caller. This is only applicable for files
+ // which are large enough to require a multi-chunk resumable upload. The
+ // default value is 32s. Users may want to pick a longer deadline if they
+ // are using larger values for ChunkSize or if they expect to have a slow or
+ // unreliable internet connection.
//
// To set a deadline on the entire upload, use context timeout or
// cancellation.
ChunkRetryDeadline time.Duration
+ // ChunkTransferTimeout sets a per-chunk request timeout for resumable uploads.
+ //
+ // For resumable uploads, the Writer will terminate the request and attempt
+ // a retry if the request to upload a particular chunk stalls for longer than
+ // this duration. Retries may continue until the ChunkRetryDeadline is reached.
+ //
+ // ChunkTransferTimeout is not applicable to uploads made using a gRPC client.
+ //
+ // The default value is no timeout.
+ ChunkTransferTimeout time.Duration
+
+ // ForceEmptyContentType is an optional parameter that is used to disable
+ // auto-detection of Content-Type. By default, if a blank Content-Type
+ // is provided, then gax.DetermineContentType is called to sniff the type.
+ ForceEmptyContentType bool
+
+ // Append is a parameter to indicate whether the writer should use appendable
+ // object semantics for the new object generation. Appendable objects are
+ // visible on the first Write() call, and can be appended to until they are
+ // finalized. The object is finalized on a call to Close().
+ //
+ // Append is only supported for gRPC. This feature is in preview and is not
+ // yet available for general use.
+ Append bool
+
// ProgressFunc can be used to monitor the progress of a large write
// operation. If ProgressFunc is not nil and writing requires multiple
// calls to the underlying service (see
@@ -102,13 +127,15 @@ type Writer struct {
o *ObjectHandle
opened bool
+ closed bool
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
obj *ObjectAttrs
- mu sync.Mutex
- err error
+ mu sync.Mutex
+ err error
+ flush func() (int64, error)
}
// Write appends to w. It implements the io.Writer interface.
@@ -147,6 +174,46 @@ func (w *Writer) Write(p []byte) (n int, err error) {
return n, err
}
+// Flush syncs all bytes currently in the Writer's buffer to Cloud Storage.
+// It returns the offset of bytes that have been currently synced to
+// Cloud Storage and an error.
+//
+// If Flush is never called, Writer will sync data automatically every
+// [Writer.ChunkSize] bytes and on [Writer.Close].
+//
+// [Writer.ProgressFunc] will be called on Flush if present.
+//
+// Do not call Flush concurrently with Write or Close. A single Writer is not
+// safe for unsynchronized use across threads.
+//
+// Flush is supported only on gRPC clients where [Writer.Append] is set
+// to true. This feature is in preview and is not yet available for general use.
+func (w *Writer) Flush() (int64, error) {
+ // Return error if Append is not true.
+ if !w.Append {
+ return 0, errors.New("storage: Flush not supported unless client uses gRPC and Append is set to true")
+ }
+ if w.closed {
+ return 0, errors.New("storage: Flush called on closed Writer")
+ }
+ // Return error if already in error state.
+ w.mu.Lock()
+ werr := w.err
+ w.mu.Unlock()
+ if werr != nil {
+ return 0, werr
+ }
+ // If Flush called before any bytes written, it should start the upload
+ // at zero bytes. This will make the object visible with zero length data.
+ if !w.opened {
+ err := w.openWriter()
+ w.progress(0)
+ return 0, err
+ }
+
+ return w.flush()
+}
+
// Close completes the write operation and flushes any buffered data.
// If Close doesn't return an error, metadata about the written object
// can be retrieved by calling Attrs.
@@ -163,6 +230,7 @@ func (w *Writer) Close() error {
}
<-w.donec
+ w.closed = true
w.mu.Lock()
defer w.mu.Unlock()
trace.EndSpan(w.ctx, w.err)
@@ -177,21 +245,26 @@ func (w *Writer) openWriter() (err error) {
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
}
- isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
+ isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist)
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
params := &openWriterParams{
- ctx: w.ctx,
- chunkSize: w.ChunkSize,
- chunkRetryDeadline: w.ChunkRetryDeadline,
- bucket: w.o.bucket,
- attrs: &w.ObjectAttrs,
- conds: w.o.conds,
- encryptionKey: w.o.encryptionKey,
- sendCRC32C: w.SendCRC32C,
- donec: w.donec,
- setError: w.error,
- progress: w.progress,
- setObj: func(o *ObjectAttrs) { w.obj = o },
+ ctx: w.ctx,
+ chunkSize: w.ChunkSize,
+ chunkRetryDeadline: w.ChunkRetryDeadline,
+ chunkTransferTimeout: w.ChunkTransferTimeout,
+ bucket: w.o.bucket,
+ attrs: &w.ObjectAttrs,
+ conds: w.o.conds,
+ encryptionKey: w.o.encryptionKey,
+ sendCRC32C: w.SendCRC32C,
+ append: w.Append,
+ donec: w.donec,
+ setError: w.error,
+ progress: w.progress,
+ setObj: func(o *ObjectAttrs) { w.obj = o },
+ setFlush: func(f func() (int64, error)) { w.flush = f },
+ setPipeWriter: func(pw *io.PipeWriter) { w.pw = pw },
+ forceEmptyContentType: w.ForceEmptyContentType,
}
if err := w.ctx.Err(); err != nil {
return err // short-circuit
@@ -260,9 +333,9 @@ func (w *Writer) validateWriteAttrs() error {
}
// progress is a convenience wrapper that reports write progress to the Writer
-// ProgressFunc if it is set and progress is non-zero.
+// ProgressFunc if it is set.
func (w *Writer) progress(p int64) {
- if w.ProgressFunc != nil && p != 0 {
+ if w.ProgressFunc != nil {
w.ProgressFunc(p)
}
}
diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md
new file mode 100644
index 000000000..78bb35b3b
--- /dev/null
+++ b/vendor/cloud.google.com/go/testing.md
@@ -0,0 +1,237 @@
+# Testing Code that depends on Go Client Libraries
+
+The Go client libraries generated as a part of `cloud.google.com/go` all take
+the approach of returning concrete types instead of interfaces. That way, new
+fields and methods can be added to the libraries without breaking users. This
+document will go over some patterns that can be used to test code that depends
+on the Go client libraries.
+
+## Testing gRPC services using fakes
+
+*Note*: You can see the full
+[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake).
+
+The clients found in `cloud.google.com/go` are gRPC based, with a couple of
+notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
+and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
+Interactions with gRPC services can be faked by serving up your own in-memory
+server within your test. One benefit of using this approach is that you don’t
+need to define an interface in your runtime code; you can keep using
+concrete struct types. You instead define a fake server in your test code. For
+example, take a look at the following function:
+
+```go
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+
+ translate "cloud.google.com/go/translate/apiv3"
+ "github.com/googleapis/gax-go/v2"
+ translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
+ ctx := context.Background()
+ log.Printf("Translating %q to %q", text, targetLang)
+ req := &translatepb.TranslateTextRequest{
+ Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
+ TargetLanguageCode: "en-US",
+ Contents: []string{text},
+ }
+ resp, err := client.TranslateText(ctx, req)
+ if err != nil {
+ return "", fmt.Errorf("unable to translate text: %v", err)
+ }
+ translations := resp.GetTranslations()
+ if len(translations) != 1 {
+ return "", fmt.Errorf("expected only one result, got %d", len(translations))
+ }
+ return translations[0].TranslatedText, nil
+}
+```
+
+Here is an example of what a fake server implementation would look like for
+faking the interactions above:
+
+```go
+import (
+ "context"
+
+ translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+type fakeTranslationServer struct {
+ translatepb.UnimplementedTranslationServiceServer
+}
+
+func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
+ resp := &translatepb.TranslateTextResponse{
+ Translations: []*translatepb.Translation{
+ &translatepb.Translation{
+ TranslatedText: "Hello World",
+ },
+ },
+ }
+ return resp, nil
+}
+```
+
+All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
+contains a similar `package.UnimplementedFooServer` type that is useful for
+creating fakes. By embedding the unimplemented server in the
+`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
+exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
+method you can “override” the default unimplemented behavior of the one RPC that
+you would like to be faked.
+
+The test itself does require a little bit of setup: start up a `net.Listener`,
+register the server, and tell the client library to call the server:
+
+```go
+import (
+ "context"
+ "net"
+ "testing"
+
+ translate "cloud.google.com/go/translate/apiv3"
+ "google.golang.org/api/option"
+ translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+func TestTranslateTextWithConcreteClient(t *testing.T) {
+ ctx := context.Background()
+
+ // Setup the fake server.
+ fakeTranslationServer := &fakeTranslationServer{}
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ gsrv := grpc.NewServer()
+ translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
+ fakeServerAddr := l.Addr().String()
+ go func() {
+ if err := gsrv.Serve(l); err != nil {
+ panic(err)
+ }
+ }()
+
+ // Create a client.
+ client, err := translate.NewTranslationClient(ctx,
+ option.WithEndpoint(fakeServerAddr),
+ option.WithoutAuthentication(),
+ option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Run the test.
+ text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if text != "Hello World" {
+ t.Fatalf("got %q, want Hello World", text)
+ }
+}
+```
+
+## Testing using mocks
+
+*Note*: You can see the full
+[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock).
+
+When mocking code you need to work with interfaces. Let’s create an interface
+for the `cloud.google.com/go/translate/apiv3` client used in the
+`TranslateTextWithConcreteClient` function mentioned in the previous section.
+The `translate.Client` has over a dozen methods but this code only uses one of
+them. Here is an interface that satisfies the interactions of the
+`translate.Client` in this function.
+
+```go
+type TranslationClient interface {
+ TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
+}
+```
+
+Now that we have an interface that satisfies the method being used we can
+rewrite the function signature to take the interface instead of the concrete
+type.
+
+```go
+func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
+// ...
+}
+```
+
+This allows a real `translate.Client` to be passed to the method in production
+and for a mock implementation to be passed in during testing. This pattern can
+be applied to any Go code, not just `cloud.google.com/go`. This is because
+interfaces in Go are implicitly satisfied. Structs in the client libraries can
+implicitly implement interfaces defined in your codebase. Let’s take a look at
+what it might look like to define a lightweight mock for the `TranslationClient`
+interface.
+
+```go
+import (
+ "context"
+ "testing"
+
+ "github.com/googleapis/gax-go/v2"
+ translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
+)
+
+type mockClient struct{}
+
+func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
+ resp := &translatepb.TranslateTextResponse{
+ Translations: []*translatepb.Translation{
+ &translatepb.Translation{
+ TranslatedText: "Hello World",
+ },
+ },
+ }
+ return resp, nil
+}
+
+func TestTranslateTextWithAbstractClient(t *testing.T) {
+ client := &mockClient{}
+ text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if text != "Hello World" {
+ t.Fatalf("got %q, want Hello World", text)
+ }
+}
+```
+
+If you prefer to not write your own mocks there are mocking frameworks such as
+[golang/mock](https://github.com/golang/mock) which can generate mocks for you
+from an interface. As a word of caution though, try to not
+[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
+
+## Testing using emulators
+
+Some of the client libraries provided in `cloud.google.com/go` support running
+against a service emulator. The concept is similar to that of using fakes,
+mentioned above, but the server is managed for you. You just need to start it up
+and instruct the client library to talk to the emulator by setting a service
+specific emulator environment variable. Current services/environment-variables
+are:
+
+- bigtable: `BIGTABLE_EMULATOR_HOST`
+- datastore: `DATASTORE_EMULATOR_HOST`
+- firestore: `FIRESTORE_EMULATOR_HOST`
+- pubsub: `PUBSUB_EMULATOR_HOST`
+- spanner: `SPANNER_EMULATOR_HOST`
+- storage: `STORAGE_EMULATOR_HOST`
+ - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
+
+For more information on emulators please refer to the
+[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index a6675492b..1799c6ef2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,111 @@
# Release History
+## 1.19.1 (2025-09-11)
+
+### Bugs Fixed
+
+* Fixed resource identifier parsing for provider-specific resource hierarchies containing "resourceGroups" segments.
+
+### Other Changes
+
+* Improved error fall-back for improperly authored long-running operations.
+* Upgraded dependencies.
+
+## 1.19.0 (2025-08-21)
+
+### Features Added
+
+* Added `runtime.APIVersionLocationPath` to be set by clients that set the API version in the path.
+
+## 1.18.2 (2025-07-31)
+
+### Bugs Fixed
+
+* Fixed a case in which `BearerTokenPolicy` didn't ensure an authentication error is non-retriable
+
+## 1.18.1 (2025-07-10)
+
+### Bugs Fixed
+
+* Fixed incorrect request/response logging try info when logging a request that's being retried.
+* Fixed a data race in `ResourceID.String()`
+
+## 1.18.0 (2025-04-03)
+
+### Features Added
+
+* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token
+
+## 1.17.1 (2025-03-20)
+
+### Other Changes
+
+* Upgraded to Go 1.23
+* Upgraded dependencies
+
+## 1.17.0 (2025-01-07)
+
+### Features Added
+
+* Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern.
+* Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`.
+
+## 1.16.0 (2024-10-17)
+
+### Features Added
+
+* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span.
+
+### Bugs Fixed
+
+* `BearerTokenPolicy` now rewinds request bodies before retrying
+
+## 1.15.0 (2024-10-14)
+
+### Features Added
+
+* `BearerTokenPolicy` handles CAE claims challenges
+
+### Bugs Fixed
+
+* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled.
+* Fixed an integer overflow in the retry policy.
+
+### Other Changes
+
+* Update dependencies.
+
+## 1.14.0 (2024-08-07)
+
+### Features Added
+
+* Added field `Attributes` to `runtime.StartSpanOptions` to simplify creating spans with attributes.
+
+### Other Changes
+
+* Include the HTTP verb and URL in `log.EventRetryPolicy` log entries so it's clear which operation is being retried.
+
+## 1.13.0 (2024-07-16)
+
+### Features Added
+
+- Added runtime.NewRequestFromRequest(), allowing for a policy.Request to be created from an existing *http.Request.
+
+## 1.12.0 (2024-06-06)
+
+### Features Added
+
+* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success.
+* Added func `NewUUID` to the `runtime` package for generating UUIDs.
+
+### Bugs Fixed
+
+* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases.
+
+### Other Changes
+
+* Updated dependencies.
+
## 1.11.1 (2024-04-02)
### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
index 187fe82b9..b8348b7d8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
@@ -27,7 +27,8 @@ var RootResourceID = &ResourceID{
}
// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
-// Don't create this type directly, use ParseResourceID instead.
+// Don't create this type directly, use [ParseResourceID] instead. Fields are considered immutable and shouldn't be
+// modified after creation.
type ResourceID struct {
// Parent is the parent ResourceID of this instance.
// Can be nil if there is no parent.
@@ -85,29 +86,22 @@ func ParseResourceID(id string) (*ResourceID, error) {
// String returns the string of the ResourceID
func (id *ResourceID) String() string {
- if len(id.stringValue) > 0 {
- return id.stringValue
- }
-
- if id.Parent == nil {
- return ""
- }
+ return id.stringValue
+}
- builder := strings.Builder{}
- builder.WriteString(id.Parent.String())
+// MarshalText returns a textual representation of the ResourceID
+func (id *ResourceID) MarshalText() ([]byte, error) {
+ return []byte(id.String()), nil
+}
- if id.isChild {
- builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType()))
- if len(id.Name) > 0 {
- builder.WriteString(fmt.Sprintf("/%s", id.Name))
- }
- } else {
- builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name))
+// UnmarshalText decodes the textual representation of a ResourceID
+func (id *ResourceID) UnmarshalText(text []byte) error {
+ newId, err := ParseResourceID(string(text))
+ if err != nil {
+ return err
}
-
- id.stringValue = builder.String()
-
- return id.stringValue
+ *id = *newId
+ return nil
}
func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID {
@@ -129,9 +123,9 @@ func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTy
}
func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType {
- if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) {
+ if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) && isSubscriptionResource(parent) {
return ResourceGroupResourceType
- } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() {
+ } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && isTenantResource(parent) {
return SubscriptionResourceType
}
@@ -170,6 +164,15 @@ func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name s
id.isChild = isChild
id.ResourceType = resourceType
id.Name = name
+ id.stringValue = id.Parent.String()
+ if id.isChild {
+ id.stringValue += "/" + id.ResourceType.lastType()
+ if id.Name != "" {
+ id.stringValue += "/" + id.Name
+ }
+ } else {
+ id.stringValue += fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)
+ }
}
func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) {
@@ -179,12 +182,12 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err
if len(parts) == 1 {
// subscriptions and resourceGroups are not valid ids without their names
- if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) {
+ if strings.EqualFold(parts[0], subscriptionsKey) && isTenantResource(parent) || strings.EqualFold(parts[0], resourceGroupsLowerKey) && isSubscriptionResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
// resourceGroup must contain either child or provider resource type
- if parent.ResourceType.String() == ResourceGroupResourceType.String() {
+ if isResourceGroupResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
@@ -192,8 +195,8 @@ func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, err
}
if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) {
- //provider resource can only be on a tenant or a subscription parent
- if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() {
+ // provider resource can only be on a tenant or a subscription parent
+ if !isSubscriptionResource(parent) && !isTenantResource(parent) {
return nil, fmt.Errorf("invalid resource ID: %s", id)
}
@@ -222,3 +225,18 @@ func splitStringAndOmitEmpty(v, sep string) []string {
return r
}
+
+// isTenantResource returns true if the resourceID represents a tenant resource. The condition is resource ID matched with TenantResourceType and has no parent.
+func isTenantResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), TenantResourceType.String()) && resourceID.Parent == nil
+}
+
+// isSubscriptionResource returns true if the resourceID represents a subscription resource. The condition is resource ID matched with SubscriptionResourceType and its parent is a tenant resource.
+func isSubscriptionResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), SubscriptionResourceType.String()) && isTenantResource(resourceID.Parent)
+}
+
+// isResourceGroupResource returns true if the resourceID represents a resource group resource. The condition is resource ID matched with ResourceGroupResourceType and its parent is a subscription resource.
+func isResourceGroupResource(resourceID *ResourceID) bool {
+ return resourceID != nil && strings.EqualFold(resourceID.ResourceType.String(), ResourceGroupResourceType.String()) && isSubscriptionResource(resourceID.Parent)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
index 039b758bf..6a7c916b4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
@@ -34,18 +34,22 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr
InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP,
Scopes: []string{conf.Audience + "/.default"},
})
+ // we don't want to modify the underlying array in plOpts.PerRetry
perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1)
copy(perRetry, plOpts.PerRetry)
- plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy))
+ perRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy))
+ plOpts.PerRetry = perRetry
if !options.DisableRPRegistration {
regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions}
regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts)
if err != nil {
return azruntime.Pipeline{}, err
}
+ // we don't want to modify the underlying array in plOpts.PerCall
perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1)
copy(perCall, plOpts.PerCall)
- plOpts.PerCall = append(perCall, regPolicy)
+ perCall = append(perCall, regPolicy)
+ plOpts.PerCall = perCall
}
if plOpts.APIVersion.Name == "" {
plOpts.APIVersion.Name = "api-version"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
index 765fbc684..8ad3d5400 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
@@ -5,7 +5,6 @@ package runtime
import (
"context"
- "encoding/base64"
"fmt"
"net/http"
"strings"
@@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok
p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP,
AuthorizationHandler: azpolicy.AuthorizationHandler{
- OnChallenge: p.onChallenge,
- OnRequest: p.onRequest,
+ OnRequest: p.onRequest,
},
})
return p
}
-func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
- challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
- claims, err := parseChallenge(challenge)
- if err != nil {
- // the challenge contains claims we can't parse
- return err
- } else if claims != "" {
- // request a new token having the specified claims, send the request again
- return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
- }
- // auth challenge didn't include claims, so this is a simple authorization failure
- return azruntime.NewResponseError(res)
-}
-
// onRequest authorizes requests with one or more bearer tokens
func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
// authorize the request with a token for the primary tenant
- err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
+ err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes})
if err != nil || len(b.auxResources) == 0 {
return err
}
@@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic
func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
return b.btp.Do(req)
}
-
-// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
-// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
-// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
-func parseChallenge(wwwAuthenticate string) (string, error) {
- claims := ""
- var err error
- for _, param := range strings.Split(wwwAuthenticate, ",") {
- if _, after, found := strings.Cut(param, "claims="); found {
- if claims != "" {
- // The header contains multiple challenges, at least two of which specify claims. The specs allow this
- // but it's unclear what a client should do in this case and there's as yet no concrete example of it.
- err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
- break
- }
- // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
- claims = strings.Trim(after, `\"=`)
- // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
- if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
- claims = string(b)
- } else {
- err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
- break
- }
- }
- }
- return claims, err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
index 99348527b..b81b62103 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
@@ -27,3 +27,5 @@ extends:
template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: azcore
+ TriggeringPaths:
+ - /eng/
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
index 17bd50c67..03cb227d0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
@@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
+//
+// When marshaling instances, the RawResponse field will be omitted.
+// However, the contents returned by Error() will be preserved.
type ResponseError = exported.ResponseError
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
index f2b296b6d..460170034 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
@@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
- Token string
+ // Token is the access token
+ Token string
+ // ExpiresOn indicates when the token expires
ExpiresOn time.Time
+ // RefreshOn is a suggested time to refresh the token.
+ // Clients should ignore this value when it's zero.
+ RefreshOn time.Time
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index 3041984d9..9b3f5badb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -7,6 +7,7 @@
package exported
import (
+ "bytes"
"context"
"encoding/base64"
"errors"
@@ -67,6 +68,43 @@ func (ov opValues) get(value any) bool {
return ok
}
+// NewRequestFromRequest creates a new policy.Request with an existing *http.Request
+// Exported as runtime.NewRequestFromRequest().
+func NewRequestFromRequest(req *http.Request) (*Request, error) {
+ // populate values so that the same instance is propagated across policies
+ policyReq := &Request{req: req, values: opValues{}}
+
+ if req.Body != nil {
+ // we can avoid a body copy here if the underlying stream is already a
+ // ReadSeekCloser.
+ readSeekCloser, isReadSeekCloser := req.Body.(io.ReadSeekCloser)
+
+ if !isReadSeekCloser {
+ // since this is an already populated http.Request we want to copy
+ // over its body, if it has one.
+ bodyBytes, err := io.ReadAll(req.Body)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Body.Close(); err != nil {
+ return nil, err
+ }
+
+ readSeekCloser = NopCloser(bytes.NewReader(bodyBytes))
+ }
+
+ // SetBody also takes care of updating the http.Request's body
+ // as well, so they should stay in-sync from this point.
+ if err := policyReq.SetBody(readSeekCloser, req.Header.Get("Content-Type")); err != nil {
+ return nil, err
+ }
+ }
+
+ return policyReq, nil
+}
+
// NewRequest creates a new Request with the specified input.
// Exported as runtime.NewRequest().
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
@@ -80,7 +118,8 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Reque
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
}
- return &Request{req: req}, nil
+ // populate values so that the same instance is propagated across policies
+ return &Request{req: req, values: opValues{}}, nil
}
// Body returns the original body specified when the Request was created.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
index 08a954587..8aec256bd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
@@ -117,12 +117,18 @@ type ResponseError struct {
StatusCode int
// RawResponse is the underlying HTTP response.
- RawResponse *http.Response
+ RawResponse *http.Response `json:"-"`
+
+ errMsg string
}
// Error implements the error interface for type ResponseError.
// Note that the message contents are not contractual and can change over time.
func (e *ResponseError) Error() string {
+ if e.errMsg != "" {
+ return e.errMsg
+ }
+
const separator = "--------------------------------------------------------------------------------"
// write the request method and URL with response status code
msg := &bytes.Buffer{}
@@ -163,5 +169,33 @@ func (e *ResponseError) Error() string {
}
fmt.Fprintln(msg, separator)
- return msg.String()
+ e.errMsg = msg.String()
+ return e.errMsg
+}
+
+// internal type used for marshaling/unmarshaling
+type responseError struct {
+ ErrorCode string `json:"errorCode"`
+ StatusCode int `json:"statusCode"`
+ ErrorMessage string `json:"errorMessage"`
+}
+
+func (e ResponseError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(responseError{
+ ErrorCode: e.ErrorCode,
+ StatusCode: e.StatusCode,
+ ErrorMessage: e.Error(),
+ })
+}
+
+func (e *ResponseError) UnmarshalJSON(data []byte) error {
+ re := responseError{}
+ if err := json.Unmarshal(data, &re); err != nil {
+ return err
+ }
+
+ e.ErrorCode = re.ErrorCode
+ e.StatusCode = re.StatusCode
+ e.errMsg = re.ErrorMessage
+ return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
index ccd4794e9..a53462760 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
@@ -155,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
index 0d781b31d..8751b0514 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
@@ -131,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
index 51aede8a2..7f8d11b8b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
@@ -124,7 +124,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return exported.NewResponseError(p.resp)
}
- return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out)
}
// SanitizePollerPath removes any fake-appended suffix from a URL's path.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
index 7a56c5211..048285275 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
@@ -119,5 +119,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
index ac1c0efb5..f49633189 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
@@ -40,12 +40,13 @@ type Poller[T any] struct {
OrigURL string `json:"origURL"`
Method string `json:"method"`
FinalState pollers.FinalStateVia `json:"finalState"`
+ ResultPath string `json:"resultPath"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
+func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Operation-Location poller.")
return &Poller[T]{pl: pl}, nil
@@ -82,6 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
+ ResultPath: resultPath,
CurState: curState,
}, nil
}
@@ -115,10 +117,9 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
var req *exported.Request
var err error
+
if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
- } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
- // no final GET required, terminal response should have it
} else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) {
return rlErr
} else if rl != "" {
@@ -134,6 +135,8 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
// if a final GET request has been created, execute it
if req != nil {
+ // no JSON path when making a final GET request
+ p.ResultPath = ""
resp, err := p.pl.Do(req)
if err != nil {
return err
@@ -141,5 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
index eb3cf651d..6a7a32e03 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
@@ -159,7 +159,7 @@ func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, up
// ResultHelper processes the response as success or failure.
// In the success case, it unmarshals the payload into either a new instance of T or out.
// In the failure case, it creates an *azcore.Response error from the response.
-func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
+func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error {
// short-circuit the simple success case with no response body to unmarshal
if resp.StatusCode == http.StatusNoContent {
return nil
@@ -176,6 +176,18 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
if err != nil {
return err
}
+
+ if jsonPath != "" && len(payload) > 0 {
+ // extract the payload from the specified JSON path.
+ // do this before the zero-length check in case there
+ // is no payload.
+ jsonBody := map[string]json.RawMessage{}
+ if err = json.Unmarshal(payload, &jsonBody); err != nil {
+ return err
+ }
+ payload = jsonBody[jsonPath]
+ }
+
if len(payload) == 0 {
return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 03691cbf0..8aebe5ce5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.11.1"
+ Version = "v1.19.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
index 8d9845358..368a2199e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
@@ -103,7 +103,7 @@ type RetryOptions struct {
// RetryDelay specifies the initial amount of delay to use before retrying an operation.
// The value is used only if the HTTP response does not contain a Retry-After header.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
- // The default value is four seconds. A value less than zero means no delay between retries.
+ // The default value is 800 milliseconds. A value less than zero means no delay between retries.
RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
@@ -161,19 +161,20 @@ type BearerTokenOptions struct {
// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
type AuthorizationHandler struct {
- // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
- // from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
- // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
- // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
- // token from its credential according to its configuration.
+ // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest
+ // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request
+ // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context,
+ // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send
+ // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token
+ // from its credential according to its configuration.
OnRequest func(*Request, func(TokenRequestOptions) error) error
- // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
- // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
- // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
- // given credential. Implementations that need to perform I/O should use the Request's context, available from
- // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
- // the policy will return any 401 response to the client.
+ // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon
+ // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle.
+ // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the
+ // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given
+ // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When
+ // OnChallenge returns nil, the policy will send the Request again.
OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
index cffe692d7..c66fc0a90 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
@@ -32,6 +32,7 @@ type PagingHandler[T any] struct {
}
// Pager provides operations for iterating over paged responses.
+// Methods on this type are not safe for concurrent use.
type Pager[T any] struct {
current *T
handler PagingHandler[T]
@@ -94,6 +95,10 @@ type FetcherForNextLinkOptions struct {
// NextReq is the func to be called when requesting subsequent pages.
// Used for paged operations that have a custom next link operation.
NextReq func(context.Context, string) (*policy.Request, error)
+
+ // StatusCodes contains additional HTTP status codes indicating success.
+ // The default value is http.StatusOK.
+ StatusCodes []int
}
// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL.
@@ -105,10 +110,13 @@ type FetcherForNextLinkOptions struct {
func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) {
var req *policy.Request
var err error
+ if options == nil {
+ options = &FetcherForNextLinkOptions{}
+ }
if nextLink == "" {
req, err = firstReq(ctx)
} else if nextLink, err = EncodeQueryParams(nextLink); err == nil {
- if options != nil && options.NextReq != nil {
+ if options.NextReq != nil {
req, err = options.NextReq(ctx, nextLink)
} else {
req, err = NewRequest(ctx, http.MethodGet, nextLink)
@@ -121,7 +129,9 @@ func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, first
if err != nil {
return nil, err
}
- if !HasStatusCode(resp, http.StatusOK) {
+ successCodes := []int{http.StatusOK}
+ successCodes = append(successCodes, options.StatusCodes...)
+ if !HasStatusCode(resp, successCodes...) {
return nil, NewResponseError(resp)
}
return resp, nil
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
index e5309aa6c..c3646feb5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
@@ -16,9 +16,10 @@ import (
// APIVersionOptions contains options for API versions
type APIVersionOptions struct {
- // Location indicates where to set the version on a request, for example in a header or query param
+ // Location indicates where to set the version on a request, for example in a header or query param.
Location APIVersionLocation
- // Name is the name of the header or query parameter, for example "api-version"
+ // Name is the name of the header or query parameter, for example "api-version".
+ // For [APIVersionLocationPath] the value is not used.
Name string
}
@@ -30,6 +31,8 @@ const (
APIVersionLocationQueryParam = 0
// APIVersionLocationHeader indicates a header
APIVersionLocationHeader = 1
+ // APIVersionLocationPath indicates a path segment
+ APIVersionLocationPath = 2
)
// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version
@@ -55,7 +58,10 @@ type apiVersionPolicy struct {
// Do sets the request's API version, if the policy is configured to do so, replacing any prior value.
func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) {
- if a.version != "" {
+ // for API versions in the path, the client is responsible for
+ // setting the correct path segment with the version. so, if the
+ // location is path the policy is effectively a no-op.
+ if a.location != APIVersionLocationPath && a.version != "" {
if a.name == "" {
// user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions
return nil, errors.New("this client doesn't support overriding its API version")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index cb2a69528..547e5a327 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -4,9 +4,12 @@
package runtime
import (
+ "encoding/base64"
"errors"
"net/http"
+ "regexp"
"strings"
+ "sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
@@ -17,6 +20,11 @@ import (
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
+// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle
+// additional authentication challenges, or needing more control over authorization, should
+// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions].
+//
+// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
@@ -43,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne
return tk, tk.ExpiresOn, nil
}
+// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it.
+var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool {
+ if tk.RefreshOn.IsZero() {
+ return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now())
+ }
+ // no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn
+ return tk.RefreshOn.Before(time.Now())
+}
+
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
@@ -51,11 +68,24 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
if opts == nil {
opts = &policy.BearerTokenOptions{}
}
+ ah := opts.AuthorizationHandler
+ if ah.OnRequest == nil {
+ // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge
+ // doesn't get a default so the policy can use a nil check to determine whether the caller
+ // provided an implementation.
+ ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error {
+ // authNZ sets EnableCAE: true in all cases, no need to duplicate that here
+ return authNZ(policy.TokenRequestOptions{Scopes: scopes})
+ }
+ }
+ mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{
+ ShouldRefresh: shouldRefresh,
+ })
return &BearerTokenPolicy{
- authzHandler: opts.AuthorizationHandler,
+ authzHandler: ah,
cred: cred,
scopes: scopes,
- mainResource: temporal.NewResource(acquire),
+ mainResource: mr,
allowHTTP: opts.InsecureAllowCredentialWithHTTP,
}
}
@@ -63,10 +93,13 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
return func(tro policy.TokenRequestOptions) error {
+ tro.EnableCAE = true
as := acquiringResourceState{p: b, req: req, tro: tro}
tk, err := b.mainResource.Get(as)
if err != nil {
- return err
+ // consider this error non-retriable because if it could be resolved by
+ // retrying authentication, the credential would have done so already
+ return errorinfo.NonRetriableError(err)
}
req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
return nil
@@ -86,12 +119,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
- var err error
- if b.authzHandler.OnRequest != nil {
- err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
- } else {
- err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
- }
+ err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
if err != nil {
return nil, errorinfo.NonRetriableError(err)
}
@@ -101,17 +129,54 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
+ res, err = b.handleChallenge(req, res, false)
+ return res, err
+}
+
+// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling
+// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge.
+// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the
+// AuthorizationHandler.
+func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) {
+ var err error
if res.StatusCode == http.StatusUnauthorized {
b.mainResource.Expire()
- if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
- if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
- res, err = req.Next()
+ if res.Header.Get(shared.HeaderWWWAuthenticate) != "" {
+ caeChallenge, parseErr := parseCAEChallenge(res)
+ if parseErr != nil {
+ return res, parseErr
+ }
+ switch {
+ case caeChallenge != nil:
+ authNZ := func(tro policy.TokenRequestOptions) error {
+ // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value
+ // will be empty at time of writing because CAE is the only feature involving claims. If in
+ // the future some client needs to specify unrelated claims, this function may need to merge
+ // them with the challenge claims.
+ tro.Claims = caeChallenge.params["claims"]
+ return b.authenticateAndAuthorize(req)(tro)
+ }
+ if err = b.authzHandler.OnRequest(req, authNZ); err == nil {
+ if err = req.RewindBody(); err == nil {
+ res, err = req.Next()
+ }
+ }
+ case b.authzHandler.OnChallenge != nil && !recursed:
+ if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
+ if err = req.RewindBody(); err == nil {
+ if res, err = req.Next(); err == nil {
+ res, err = b.handleChallenge(req, res, true)
+ }
+ }
+ } else {
+ // don't retry challenge handling errors
+ err = errorinfo.NonRetriableError(err)
+ }
+ default:
+ // return the response to the pipeline
}
}
}
- if err != nil {
- err = errorinfo.NonRetriableError(err)
- }
return res, err
}
@@ -121,3 +186,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error {
}
return nil
}
+
+// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none).
+// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError.
+func parseCAEChallenge(res *http.Response) (*authChallenge, error) {
+ var (
+ caeChallenge *authChallenge
+ err error
+ )
+ for _, c := range parseChallenges(res) {
+ if c.scheme == "Bearer" {
+ if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" {
+ if b, de := base64.StdEncoding.DecodeString(claims); de == nil {
+ c.params["claims"] = string(b)
+ caeChallenge = &c
+ } else {
+ // don't include the decoding error because it's something
+ // unhelpful like "illegal base64 data at input byte 42"
+ err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims))
+ }
+ break
+ }
+ }
+ }
+ return caeChallenge, err
+}
+
+var (
+ challenge, challengeParams *regexp.Regexp
+ once = &sync.Once{}
+)
+
+type authChallenge struct {
+ scheme string
+ params map[string]string
+}
+
+// parseChallenges assumes authentication challenges have quoted parameter values
+func parseChallenges(res *http.Response) []authChallenge {
+ once.Do(func() {
+ // matches challenges having quoted parameters, capturing scheme and parameters
+ challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`)
+ // captures parameter names and values in a match of the above expression
+ challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`)
+ })
+ parsed := []authChallenge{}
+ // WWW-Authenticate can have multiple values, each containing multiple challenges
+ for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) {
+ for _, sm := range challenge.FindAllStringSubmatch(h, -1) {
+ // sm is [challenge, scheme, params] (see regexp documentation on submatches)
+ c := authChallenge{
+ params: make(map[string]string),
+ scheme: sm[1],
+ }
+ for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) {
+ // sm is [key="value", key, value] (see regexp documentation on submatches)
+ c.params[sm[1]] = sm[2]
+ }
+ parsed = append(parsed, c)
+ }
+ }
+ return parsed
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
index 3df1c1218..f375195c4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
@@ -96,7 +96,10 @@ func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err erro
// StartSpanOptions contains the optional values for StartSpan.
type StartSpanOptions struct {
- // for future expansion
+ // Kind indicates the kind of Span.
+ Kind tracing.SpanKind
+ // Attributes contains key-value pairs of attributes for the span.
+ Attributes []tracing.Attribute
}
// StartSpan starts a new tracing span.
@@ -114,7 +117,6 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
// we MUST propagate the active tracer before returning so that the trace policy can access it
ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer)
- const newSpanKind = tracing.SpanKindInternal
if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil {
// per the design guidelines, if a SDK method Foo() calls SDK method Bar(),
// then the span for Bar() must be suppressed. however, if Bar() makes a REST
@@ -126,10 +128,19 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
return ctx, func(err error) {}
}
}
+
+ if options == nil {
+ options = &StartSpanOptions{}
+ }
+ if options.Kind == 0 {
+ options.Kind = tracing.SpanKindInternal
+ }
+
ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{
- Kind: newSpanKind,
+ Kind: options.Kind,
+ Attributes: options.Attributes,
})
- ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind)
+ ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind)
return ctx, func(err error) {
if err != nil {
errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
index 04d7bb4ec..4c3a31fea 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
@@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) {
}
func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
- delay := time.Duration((1< o.MaxRetryDelay {
+ delayFloat := float64(delay) * jitterMultiplier
+ if delayFloat > float64(math.MaxInt64) {
+ // the jitter pushed us over MaxInt64, so just use MaxInt64
+ delay = time.Duration(math.MaxInt64)
+ } else {
+ delay = time.Duration(delayFloat)
+ }
+
+ if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value
delay = o.MaxRetryDelay
}
+
return delay
}
@@ -102,7 +122,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
try := int32(1)
for {
resp = nil // reset
- log.Writef(log.EventRetryPolicy, "=====> Try=%d", try)
+ // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs
+ log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil)))
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index 03f76c9aa..a89ae9b7b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -50,8 +50,14 @@ const (
// NewPollerOptions contains the optional parameters for NewPoller.
type NewPollerOptions[T any] struct {
// FinalStateVia contains the final-state-via value for the LRO.
+ // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs.
FinalStateVia FinalStateVia
+ // OperationLocationResultPath contains the JSON path to the result's
+ // payload when it's included with the terminal success response.
+ // NOTE: only used for Operation-Location LROs.
+ OperationLocationResultPath string
+
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
@@ -85,7 +91,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !poller.StatusCodeValid(resp) {
- return nil, errors.New("the operation failed or was cancelled")
+ return nil, exported.NewResponseError(resp)
}
// determine the polling method
@@ -98,7 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
opr, err = async.New[T](pl, resp, options.FinalStateVia)
} else if op.Applicable(resp) {
// op poller must be checked before loc as it can also have a location header
- opr, err = op.New[T](pl, resp, options.FinalStateVia)
+ opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath)
} else if loc.Applicable(resp) {
opr, err = loc.New[T](pl, resp)
} else if body.Applicable(resp) {
@@ -172,7 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
} else if loc.CanResume(asJSON) {
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
- opr, _ = op.New[T](pl, nil, "")
+ opr, _ = op.New[T](pl, nil, "", "")
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}
@@ -200,6 +206,7 @@ type PollingHandler[T any] interface {
}
// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
+// Methods on this type are not safe for concurrent use.
type Poller[T any] struct {
op PollingHandler[T]
resp *http.Response
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
index 06ac95b1b..7d34b7803 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
@@ -15,6 +15,7 @@ import (
"fmt"
"io"
"mime/multipart"
+ "net/http"
"net/textproto"
"net/url"
"path"
@@ -24,6 +25,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
@@ -44,6 +46,11 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic
return exported.NewRequest(ctx, httpMethod, endpoint)
}
+// NewRequestFromRequest creates a new policy.Request with an existing *http.Request
+func NewRequestFromRequest(req *http.Request) (*policy.Request, error) {
+ return exported.NewRequestFromRequest(req)
+}
+
// EncodeQueryParams will parse and encode any query parameters in the specified URL.
// Any semicolons will automatically be escaped.
func EncodeQueryParams(u string) (string, error) {
@@ -263,3 +270,12 @@ func SkipBodyDownload(req *policy.Request) {
// CtxAPINameKey is used as a context key for adding/retrieving the API name.
type CtxAPINameKey = shared.CtxAPINameKey
+
+// NewUUID returns a new UUID using the RFC4122 algorithm.
+func NewUUID() (string, error) {
+ u, err := uuid.New()
+ if err != nil {
+ return "", err
+ }
+ return u.String(), nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
new file mode 100644
index 000000000..567e6975b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
@@ -0,0 +1,20 @@
+# Breaking Changes
+
+## v1.8.0
+
+### New errors from `NewManagedIdentityCredential` in some environments
+
+`NewManagedIdentityCredential` now returns an error when `ManagedIdentityCredentialOptions.ID` is set in a hosting environment whose managed identity API doesn't support user-assigned identities. `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. Returning an error instead prevents the credential authenticating an unexpected identity. The affected hosting environments are:
+ * Azure Arc
+ * Azure ML (when a resource or object ID is specified; client IDs are supported)
+ * Cloud Shell
+ * Service Fabric
+
+## v1.6.0
+
+### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios
+
+As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed
+identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint
+is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error
+response can appear in logs but doesn't indicate authentication failed.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index a8c2feb6d..217d279fd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,148 @@
# Release History
+## 1.13.0 (2025-10-07)
+
+### Features Added
+
+- Added `AzurePowerShellCredential`, which authenticates as the identity logged in to Azure PowerShell
+ (thanks [ArmaanMcleod](https://github.com/ArmaanMcleod))
+- When `AZURE_TOKEN_CREDENTIALS` is set to `ManagedIdentityCredential`, `DefaultAzureCredential` behaves the same as
+ does `ManagedIdentityCredential` when used directly. It doesn't apply special retry configuration or attempt to
+ determine whether IMDS is available. ([#25265](https://github.com/Azure/azure-sdk-for-go/issues/25265))
+
+### Breaking Changes
+
+* Removed the `WorkloadIdentityCredential` support for identity binding mode added in v1.13.0-beta.1.
+ It will return in v1.14.0-beta.1
+
+## 1.13.0-beta.1 (2025-09-17)
+
+### Features Added
+
+- Added `AzurePowerShellCredential`, which authenticates as the identity logged in to Azure PowerShell
+ (thanks [ArmaanMcleod](https://github.com/ArmaanMcleod))
+- `WorkloadIdentityCredential` supports identity binding mode ([#25056](https://github.com/Azure/azure-sdk-for-go/issues/25056))
+
+## 1.12.0 (2025-09-16)
+
+### Features Added
+- Added `DefaultAzureCredentialOptions.RequireAzureTokenCredentials`. `NewDefaultAzureCredential` returns an
+ error when this option is true and the environment variable `AZURE_TOKEN_CREDENTIALS` has no value.
+
+### Other Changes
+- `AzureDeveloperCLICredential` no longer hangs when AZD_DEBUG is set
+- `GetToken` methods of `AzureCLICredential` and `AzureDeveloperCLICredential` return an error when
+ `TokenRequestOptions.Claims` has a value because these credentials can't acquire a token in that
+ case. The error messages describe the action required to get a token.
+
+## 1.11.0 (2025-08-05)
+
+### Other Changes
+- `DefaultAzureCredential` tries its next credential when a dev tool credential such as
+ `AzureCLICredential` returns an error
+
+## 1.11.0-beta.1 (2025-07-15)
+
+### Features Added
+- `DefaultAzureCredential` allows selecting one of its credential types by name via environment variable
+ `AZURE_TOKEN_CREDENTIALS`. It will use only the selected type at runtime. For example, set
+ `AZURE_TOKEN_CREDENTIALS=WorkloadIdentityCredential` to have `DefaultAzureCredential` use only
+ `WorkloadIdentityCredential`.
+
+### Other Changes
+- By default, `ManagedIdentityCredential` retries IMDS requests for a maximum of ~70 seconds as recommended
+ in IMDS documentation. In previous versions, it would stop retrying after ~54 seconds by default.
+
+## 1.10.1 (2025-06-10)
+
+### Bugs Fixed
+- `AzureCLICredential` and `AzureDeveloperCLICredential` could wait indefinitely for subprocess output
+
+## 1.10.0 (2025-05-14)
+
+### Features Added
+- `DefaultAzureCredential` reads environment variable `AZURE_TOKEN_CREDENTIALS` to enable a subset of its credentials:
+ - `dev` selects `AzureCLICredential` and `AzureDeveloperCLICredential`
+ - `prod` selects `EnvironmentCredential`, `WorkloadIdentityCredential` and `ManagedIdentityCredential`
+
+## 1.9.0 (2025-04-08)
+
+### Features Added
+* `GetToken()` sets `AccessToken.RefreshOn` when the token provider specifies a value
+
+### Other Changes
+* `NewManagedIdentityCredential` logs the configured user-assigned identity, if any
+* Deprecated `UsernamePasswordCredential` because it can't support multifactor
+ authentication (MFA), which Microsoft Entra ID requires for most tenants. See
+ https://aka.ms/azsdk/identity/mfa for migration guidance.
+* Updated dependencies
+
+## 1.8.2 (2025-02-12)
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.8.1 (2025-01-15)
+
+### Bugs Fixed
+* User credential types inconsistently log access token scopes
+* `DefaultAzureCredential` skips managed identity in Azure Container Instances
+* Credentials having optional tenant IDs such as `AzureCLICredential` and
+ `InteractiveBrowserCredential` require setting `AdditionallyAllowedTenants`
+ when used with some clients
+
+### Other Changes
+* `ChainedTokenCredential` and `DefaultAzureCredential` continue to their next
+ credential after `ManagedIdentityCredential` receives an unexpected response
+ from IMDS, indicating the response is from something else such as a proxy
+
+## 1.8.0 (2024-10-08)
+
+### Other Changes
+* `AzurePipelinesCredential` sets an additional OIDC request header so that it
+ receives a 401 instead of a 302 after presenting an invalid system access token
+* Allow logging of debugging headers for `AzurePipelinesCredential` and include
+ them in error messages
+
+## 1.8.0-beta.3 (2024-09-17)
+
+### Features Added
+* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID`
+
+### Other Changes
+* Removed redundant content from error messages
+
+## 1.8.0-beta.2 (2024-08-06)
+
+### Breaking Changes
+* `NewManagedIdentityCredential` now returns an error when a user-assigned identity
+ is specified on a platform whose managed identity API doesn't support that.
+ `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases.
+ Returning an error instead prevents the credential authenticating an unexpected
+ identity, causing a client to act with unexpected privileges. The affected
+ platforms are:
+ * Azure Arc
+ * Azure ML (when a resource ID is specified; client IDs are supported)
+ * Cloud Shell
+ * Service Fabric
+
+### Other Changes
+* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before
+ attempting to authenticate a managed identity, it continues to the next credential
+ in the chain instead of immediately returning an error.
+
+## 1.8.0-beta.1 (2024-07-17)
+
+### Features Added
+* Restored persistent token caching feature
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.7.0-beta.1
+* Redesigned the persistent caching API. Encryption is now required in all cases
+ and persistent cache construction is separate from credential construction.
+ The `PersistentUserAuthentication` example in the package docs has been updated
+ to demonstrate the new API.
+
## 1.7.0 (2024-06-20)
### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
index 4404be824..29b60baec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
@@ -304,4 +304,4 @@ client := subscriptions.NewClient()
client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"})
```
-
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index 7e201ea2f..127c25b72 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -1,6 +1,6 @@
# Azure Identity Client Module for Go
-The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
+The Azure Identity module provides [Microsoft Entra ID](https://learn.microsoft.com/entra/fundamentals/whatis) token-based authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/)
@@ -21,7 +21,7 @@ go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
## Prerequisites
- an [Azure subscription](https://azure.microsoft.com/free/)
-- Go 1.18
+- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go
### Authenticating during local development
@@ -54,17 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID.
### DefaultAzureCredential
-`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
-
-
-
-1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
-1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
-1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
-1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
-1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account.
-
-> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
+`DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview].
## Managed Identity
@@ -126,12 +116,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
## Credential Types
-### Authenticating Azure Hosted Applications
+### Credential chains
+
+|Credential|Usage|Reference
+|-|-|-
+|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]|
+|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]|
+
+### Authenticating Azure-Hosted Applications
|Credential|Usage
|-|-
-|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
-|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes
@@ -151,20 +146,20 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|-|-
|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
-|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
### Authenticating via Development Tools
|Credential|Usage
|-|-
|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
-|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
+|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
+|[AzurePowerShellCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzurePowerShellCredential)|Authenticates as the user signed in to Azure PowerShell
## Environment Variables
`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
-#### Service principal with secret
+### Service principal with secret
|variable name|value
|-|-
@@ -172,7 +167,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_SECRET`|one of the application's client secrets
-#### Service principal with certificate
+### Service principal with certificate
|variable name|value
|-|-
@@ -181,16 +176,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
-#### Username and password
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_USERNAME`|a username (usually an email address)
-|`AZURE_PASSWORD`|that user's password
-
-Configuration is attempted in the above order. For example, if values for a
-client secret and certificate are both present, the client secret will be used.
+Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used.
## Token caching
@@ -255,4 +241,8 @@ For more information, see the
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
additional questions or comments.
-
+
+[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
+[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
+
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
index fbaa29220..8bdaf8165 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
@@ -1,62 +1,46 @@
## Token caching in the Azure Identity client module
-*Token caching* is a feature provided by the Azure Identity library that allows apps to:
+Token caching helps apps:
- Improve their resilience and performance.
-- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
-- Reduce the number of times the user is prompted to authenticate.
+- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens.
+- Reduce the number of times users are prompted to authenticate.
-When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token.
+When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use.
-Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested.
+#### Caching can't be disabled
-### In-memory token caching
-
-*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe.
-
-**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library.
+Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances.
-#### Caching cannot be disabled
+### In-memory token caching
-As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance.
+Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache.
### Persistent token caching
-> Only azidentity v1.5.0-beta versions support persistent token caching
-
-*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems.
+Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs.
-| Operating system | Storage mechanism |
-|------------------|---------------------------------------|
-| Linux | kernel key retention service (keyctl) |
-| macOS | Keychain |
-| Windows | DPAPI |
+Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
-By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform.
-However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access.
+| Operating system | Encryption facility | Limitations |
+| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. |
+| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). |
+| Windows | Data Protection API (DPAPI) | No specific limitations. |
-With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which:
-
-- Makes the app more resilient to failures.
-- Ensures the app can continue to function during an Entra ID outage or disruption.
-- Avoids having to prompt users to authenticate each time the process is restarted.
-
->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains.
-
-#### Example code
-
-See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data.
+Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example].
### Credentials supporting token caching
The following table indicates the state of in-memory and persistent caching in each credential type.
-**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache).
+**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example].
| Credential | In-memory token caching | Persistent token caching |
-|--------------------------------|---------------------------------------------------------------------|--------------------------|
+| ------------------------------ | ------------------------------------------------------------------- | ------------------------ |
| `AzureCLICredential` | Not Supported | Not Supported |
| `AzureDeveloperCLICredential` | Not Supported | Not Supported |
+| `AzurePowerShellCredential` | Not Supported | Not Supported |
| `AzurePipelinesCredential` | Supported | Supported |
| `ClientAssertionCredential` | Supported | Supported |
| `ClientCertificateCredential` | Supported | Supported |
@@ -66,6 +50,8 @@ The following table indicates the state of in-memory and persistent caching in e
| `EnvironmentCredential` | Supported | Not Supported |
| `InteractiveBrowserCredential` | Supported | Supported |
| `ManagedIdentityCredential` | Supported | Not Supported |
-| `OnBehalfOfCredential` | Supported | Supported |
-| `UsernamePasswordCredential` | Supported | Supported |
+| `OnBehalfOfCredential` | Supported | Not Supported |
| `WorkloadIdentityCredential` | Supported | Supported |
+
+[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication
+[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index 54016a070..517006a42 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -8,18 +8,18 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Permission issues](#permission-issues)
- [Find relevant information in errors](#find-relevant-information-in-errors)
- [Enable and configure logging](#enable-and-configure-logging)
+- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues)
- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues)
- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues)
+- [Troubleshoot AzurePowerShellCredential authentication issues](#troubleshoot-azurepowershellcredential-authentication-issues)
- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues)
- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues)
- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues)
- [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
- - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
- [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
-- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues)
- [Get additional help](#get-additional-help)
@@ -86,6 +86,7 @@ azlog.SetEvents(azidentity.EventAuthentication)
|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|- [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
- Consult the troubleshooting guide for underlying credential types for more information.
- [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
- [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
- [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
|
|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|- [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
- If an unexpected credential is returning a token, check application configuration such as environment variables.
- Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
|
|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.|
+|invalid AZURE_TOKEN_CREDENTIALS value "..."|AZURE_TOKEN_CREDENTIALS has an unexpected value|Specify a valid value as described in [DefaultAzureCredential documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)
## Troubleshoot EnvironmentCredential authentication issues
@@ -110,13 +111,6 @@ azlog.SetEvents(azidentity.EventAuthentication)
|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).|
|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
-
-## Troubleshoot UsernamePasswordCredential authentication issues
-
-| Error Code | Issue | Mitigation |
-|---|---|---|
-|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.|
-
## Troubleshoot ManagedIdentityCredential authentication issues
@@ -126,7 +120,6 @@ azlog.SetEvents(azidentity.EventAuthentication)
|---|---|---|
|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
-|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)|
|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)||
@@ -165,14 +158,6 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
-### Azure Kubernetes Service managed identity
-
-#### Pod Identity
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response).
-
## Troubleshoot AzureCLICredential authentication issues
@@ -180,6 +165,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
|---|---|---|
|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|- Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
- Validate the installation location is in the application's `PATH` environment variable.
|
|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|- Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
- Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
|
+|Subscription "[your subscription]" contains invalid characters. If this is the name of a subscription, use its ID instead|The subscription name contains a character that may not be safe in a command line.|Use the subscription's ID instead of its name. You can get this from the Azure CLI: `az account show --name "[your subscription]" --query "id"`
#### Verify the Azure CLI can obtain tokens
@@ -220,12 +206,40 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
```
>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security.
+
+## Troubleshoot `AzurePowerShellCredential` authentication issues
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|executable not found on path|No local installation of PowerShell was found.|Ensure that PowerShell is properly installed on the machine. Instructions for installing PowerShell can be found [here](https://learn.microsoft.com/powershell/scripting/install/installing-powershell).|
+|Az.Accounts module not found|The Az.Account module needed for authentication in Azure PowerShell isn't installed.|Install the latest Az.Account module. Installation instructions can be found [here](https://learn.microsoft.com/powershell/azure/install-az-ps).|
+|Please run "Connect-AzAccount" to set up account.|No account is currently logged into Azure PowerShell.|- Log in to Azure PowerShell using the `Connect-AzAccount` command. More instructions for authenticating Azure PowerShell can be found at [Sign in with Azure PowerShell](https://learn.microsoft.com/powershell/azure/authenticate-azureps).
- Validate that Azure PowerShell can obtain tokens. For instructions, see [Verify Azure PowerShell can obtain tokens](#verify-azure-powershell-can-obtain-tokens).
|
+
+#### __Verify Azure PowerShell can obtain tokens__
+
+You can manually verify that Azure PowerShell is authenticated and can obtain tokens. First, use the `Get-AzContext` command to verify the account that is currently logged in to Azure PowerShell.
+
+```
+PS C:\> Get-AzContext
+
+Name Account SubscriptionName Environment TenantId
+---- ------- ---------------- ----------- --------
+Subscription1 (xxxxxxxx-xxxx-xxxx-xxx... test@outlook.com Subscription1 AzureCloud xxxxxxxx-x...
+```
+
+Once you've verified Azure PowerShell is using correct account, validate that it's able to obtain tokens for this account:
+
+```bash
+Get-AzAccessToken -ResourceUrl "https://management.core.windows.net"
+```
+>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security.
+
## Troubleshoot `WorkloadIdentityCredential` authentication issues
| Error Message |Description| Mitigation |
|---|---|---|
-|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.- If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
- If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions`
+|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
- If your application runs on Azure Kubernetes Service (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
- If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions`
## Troubleshoot AzurePipelinesCredential authentication issues
@@ -234,7 +248,30 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
|---|---|---|
| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.|
| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.|
-|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
+|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
+
+## Troubleshoot persistent token caching issues
+
+### macOS
+
+[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios.
+
+#### Build errors
+
+Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example:
+```
+$ GOOS=darwin go build
+# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache
+../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New
+../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount
+```
+
+Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools.
+
+#### Runtime errors
+
+macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like
+`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs.
## Get additional help
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
index bff0c44da..1646ff911 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_087379b475"
+ "Tag": "go/azidentity_530ea4279b"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
index ada4d6501..840a71469 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
@@ -18,10 +18,10 @@ import (
var supportedAuthRecordVersions = []string{"1.0"}
-// authenticationRecord is non-secret account information about an authenticated user that user credentials such as
+// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as
// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication
-// data. Call these credentials' Authenticate method to get an authenticationRecord for a user.
-type authenticationRecord struct {
+// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user.
+type AuthenticationRecord struct {
// Authority is the URL of the authority that issued the token.
Authority string `json:"authority"`
@@ -42,11 +42,11 @@ type authenticationRecord struct {
}
// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord
-func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
+func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error {
// Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we
// want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally
// different type enables this by assigning all the fields without recursing into this method.
- type r authenticationRecord
+ type r AuthenticationRecord
err := json.Unmarshal(b, (*r)(a))
if err != nil {
return err
@@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
}
// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued.
-func (a *authenticationRecord) account() public.Account {
+func (a *AuthenticationRecord) account() public.Account {
return public.Account{
Environment: a.Authority,
HomeAccountID: a.HomeAccountID,
@@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account {
}
}
-func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) {
+func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) {
u, err := url.Parse(ar.IDToken.Issuer)
if err != nil {
- return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
+ return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
}
tenant := ar.IDToken.TenantID
if tenant == "" {
@@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error)
if username == "" {
username = ar.IDToken.UPN
}
- return authenticationRecord{
+ return AuthenticationRecord{
Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host),
ClientID: ar.IDToken.Audience,
HomeAccountID: ar.Account.HomeAccountID,
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
index b0965036b..bd196ddd3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
)
@@ -42,6 +43,8 @@ const (
developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
defaultSuffix = "/.default"
+ scopeLogFmt = "%s.GetToken() acquired a token for scope %q"
+
traceNamespace = "Microsoft.Entra"
traceOpGetToken = "GetToken"
traceOpAuthenticate = "Authenticate"
@@ -53,8 +56,14 @@ var (
errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names")
)
-// tokenCachePersistenceOptions contains options for persistent token caching
-type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions
+// Cache represents a persistent cache that makes authentication data available across processes.
+// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
+// [persistent user authentication example] shows how to use a persistent cache to reuse user
+// logins across application runs. For service principal credential types such as
+// [ClientCertificateCredential], simply set the Cache field on the credential options.
+//
+// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
+type Cache = internal.Cache
// setAuthorityHost initializes the authority host for credentials. Precedence is:
// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user
@@ -97,7 +106,16 @@ func resolveAdditionalTenants(tenants []string) []string {
return cp
}
-// resolveTenant returns the correct tenant for a token request
+// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't
+// have an explicitly configured tenant and the caller didn't specify a tenant for the token request.
+//
+// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials
+// having an optional or implicit tenant such as dev tool and interactive user credentials. Those
+// default to the tool's configured tenant or the user's home tenant, respectively.
+// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "".
+// - credName: name of the calling credential type; for error messages
+// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in
+// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option
func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) {
if specified == "" || specified == defaultTenant {
return defaultTenant, nil
@@ -113,6 +131,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants
return specified, nil
}
}
+ if len(additionalTenants) == 0 {
+ switch defaultTenant {
+ case "", organizationsTenantID:
+ // The application didn't specify a tenant or allow list when constructing the credential. Allow the
+ // tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously
+ // satisfies the credential's configuration); don't know whether the application is multitenant; and
+ // don't want to return an error in the common case that the specified tenant matches the credential's
+ // default tenant determined elsewhere e.g., in some dev tool's configuration.
+ return specified, nil
+ }
+ }
return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified)
}
@@ -180,6 +209,10 @@ type msalConfidentialClient interface {
AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error)
}
+type msalManagedIdentityClient interface {
+ AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error)
+}
+
// enables fakes for test scenarios
type msalPublicClient interface {
AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
index b9976f5fe..6944152c9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
@@ -7,14 +7,11 @@
package azidentity
import (
- "bytes"
"context"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
- "os"
- "os/exec"
- "runtime"
"strings"
"sync"
"time"
@@ -26,13 +23,11 @@ import (
const credNameAzureCLI = "AzureCLICredential"
-type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error)
-
// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
type AzureCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
// Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other
@@ -45,15 +40,8 @@ type AzureCLICredentialOptions struct {
// inDefaultChain is true when the credential is part of DefaultAzureCredential
inDefaultChain bool
- // tokenProvider is used by tests to fake invoking az
- tokenProvider azTokenProvider
-}
-
-// init returns an instance of AzureCLICredentialOptions initialized with default values.
-func (o *AzureCLICredentialOptions) init() {
- if o.tokenProvider == nil {
- o.tokenProvider = defaultAzTokenProvider
- }
+ // exec is used by tests to fake invoking az
+ exec executor
}
// AzureCLICredential authenticates as the identity logged in to the Azure CLI.
@@ -70,13 +58,19 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent
}
for _, r := range cp.Subscription {
if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') {
- return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription)
+ return nil, fmt.Errorf(
+ "%s: Subscription %q contains invalid characters. If this is the name of a subscription, use its ID instead",
+ credNameAzureCLI,
+ cp.Subscription,
+ )
}
}
if cp.TenantID != "" && !validTenantID(cp.TenantID) {
return nil, errInvalidTenantID
}
- cp.init()
+ if cp.exec == nil {
+ cp.exec = shellExec
+ }
cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants)
return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
}
@@ -95,14 +89,37 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ
if err != nil {
return at, err
}
+ // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
+ resource := strings.TrimSuffix(opts.Scopes[0], defaultSuffix)
+ command := "az account get-access-token -o json --resource " + resource
+ tenantArg := ""
+ if tenant != "" {
+ tenantArg = " --tenant " + tenant
+ command += tenantArg
+ }
+ if c.opts.Subscription != "" {
+ // subscription needs quotes because it may contain spaces
+ command += ` --subscription "` + c.opts.Subscription + `"`
+ }
+ if opts.Claims != "" {
+ encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims))
+ return at, fmt.Errorf(
+ "%s.GetToken(): Azure CLI requires multifactor authentication or additional claims. Run this command then retry the operation: az login%s --claims-challenge %s",
+ credNameAzureCLI,
+ tenantArg,
+ encoded,
+ )
+ }
+
c.mu.Lock()
defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription)
+
+ b, err := c.opts.exec(ctx, credNameAzureCLI, command)
if err == nil {
at, err = c.createAccessToken(b)
}
if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ err = unavailableIfInDAC(err, c.opts.inDefaultChain)
return at, err
}
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", "))
@@ -110,57 +127,6 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ
return at, nil
}
-// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) {
- // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
- resource := strings.TrimSuffix(scopes[0], defaultSuffix)
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "az account get-access-token -o json --resource " + resource
- if tenantID != "" {
- commandLine += " --tenant " + tenantID
- }
- if subscription != "" {
- // subscription needs quotes because it may contain spaces
- commandLine += ` --subscription "` + subscription + `"`
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
-
- output, err := cliCmd.Output()
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") {
- msg = "Azure CLI not found on path"
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureCLI, msg)
- }
-
- return output, nil
-}
-
func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
t := struct {
AccessToken string `json:"accessToken"`
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
index cbe7c4c2d..f97bf95df 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
@@ -7,14 +7,11 @@
package azidentity
import (
- "bytes"
"context"
+ "encoding/base64"
"encoding/json"
"errors"
"fmt"
- "os"
- "os/exec"
- "runtime"
"strings"
"sync"
"time"
@@ -24,15 +21,16 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
-
-type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error)
+const (
+ credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
+ mfaRequired = "Azure Developer CLI requires multifactor authentication or additional claims"
+)
// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
type AzureDeveloperCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
// TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment,
@@ -41,8 +39,8 @@ type AzureDeveloperCLICredentialOptions struct {
// inDefaultChain is true when the credential is part of DefaultAzureCredential
inDefaultChain bool
- // tokenProvider is used by tests to fake invoking azd
- tokenProvider azdTokenProvider
+ // exec is used by tests to fake invoking azd
+ exec executor
}
// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI].
@@ -62,8 +60,8 @@ func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions)
if cp.TenantID != "" && !validTenantID(cp.TenantID) {
return nil, errInvalidTenantID
}
- if cp.tokenProvider == nil {
- cp.tokenProvider = defaultAzdTokenProvider
+ if cp.exec == nil {
+ cp.exec = shellExec
}
return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
}
@@ -75,23 +73,52 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.
if len(opts.Scopes) == 0 {
return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope")
}
+ command := "azd auth token -o json --no-prompt"
for _, scope := range opts.Scopes {
if !validScope(scope) {
return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope)
}
+ command += " --scope " + scope
}
tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants)
if err != nil {
return at, err
}
+ if tenant != "" {
+ command += " --tenant-id " + tenant
+ }
+ commandNoClaims := command
+ if opts.Claims != "" {
+ encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims))
+ command += " --claims " + encoded
+ }
+
c.mu.Lock()
defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant)
+
+ b, err := c.opts.exec(ctx, credNameAzureDeveloperCLI, command)
if err == nil {
at, err = c.createAccessToken(b)
}
if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
+ msg := err.Error()
+ switch {
+ case strings.Contains(msg, "unknown flag: --claims"):
+ err = newAuthenticationFailedError(
+ credNameAzureDeveloperCLI,
+ mfaRequired+", however the installed version doesn't support this. Upgrade to version 1.18.1 or later",
+ nil,
+ )
+ case opts.Claims != "":
+ err = newAuthenticationFailedError(
+ credNameAzureDeveloperCLI,
+ mfaRequired+". Run this command then retry the operation: "+commandNoClaims,
+ nil,
+ )
+ case strings.Contains(msg, "azd auth login"):
+ err = newCredentialUnavailableError(credNameAzureDeveloperCLI, `please run "azd auth login" from a command prompt to authenticate before using this credential`)
+ }
+ err = unavailableIfInDAC(err, c.opts.inDefaultChain)
return at, err
}
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", "))
@@ -99,54 +126,6 @@ func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.
return at, nil
}
-// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) {
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "azd auth token -o json"
- if tenant != "" {
- commandLine += " --tenant-id " + tenant
- }
- for _, scope := range scopes {
- commandLine += " --scope " + scope
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
- output, err := cliCmd.Output()
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") {
- msg = "Azure Developer CLI not found on path"
- } else if strings.Contains(msg, "azd auth login") {
- msg = `please run "azd auth login" from a command prompt to authenticate before using this credential`
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg)
- }
- return output, nil
-}
-
func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
t := struct {
AccessToken string `json:"token"`
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
index 80c1806bb..a4b8ab6f4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
@@ -20,6 +20,8 @@ const (
credNameAzurePipelines = "AzurePipelinesCredential"
oidcAPIVersion = "7.1"
systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI"
+ xMsEdgeRef = "x-msedge-ref"
+ xVssE2eId = "x-vss-e2eid"
)
// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See
@@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system
if options == nil {
options = &AzurePipelinesCredentialOptions{}
}
+ // these headers are useful to the DevOps team when debugging OIDC error responses
+ options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId)
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
@@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er
url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID
url, err := runtime.EncodeQueryParams(url)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil)
}
req.Header.Set("Authorization", "Bearer "+a.systemAccessToken)
+ // instruct endpoint to return 401 instead of 302, if the system access token is invalid
+ req.Header.Set("X-TFS-FedAuthRedirect", "Suppress")
res, err := doForClient(a.cred.client.azClient, req)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil)
}
if res.StatusCode != http.StatusOK {
- msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration"
+ msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration."
+ for _, h := range []string{xMsEdgeRef, xVssE2eId} {
+ if v := res.Header.Get(h); v != "" {
+ msg += fmt.Sprintf("\n%s: %s", h, v)
+ }
+ }
// include the response because its body, if any, probably contains an error message.
// OK responses aren't included with errors because they probably contain secrets
- return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res)
}
b, err := runtime.Payload(res)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil)
}
var r struct {
OIDCToken string `json:"oidcToken"`
}
err = json.Unmarshal(b, &r)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil)
}
return r.OIDCToken, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_powershell_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_powershell_credential.go
new file mode 100644
index 000000000..082965554
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_powershell_credential.go
@@ -0,0 +1,234 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os/exec"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf16"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const (
+ credNameAzurePowerShell = "AzurePowerShellCredential"
+ noAzAccountModule = "Az.Accounts module not found"
+)
+
+// AzurePowerShellCredentialOptions contains optional parameters for AzurePowerShellCredential.
+type AzurePowerShellCredentialOptions struct {
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
+ AdditionallyAllowedTenants []string
+
+ // TenantID identifies the tenant the credential should authenticate in.
+ // Defaults to Azure PowerShell's default tenant, which is typically the home tenant of the logged in user.
+ TenantID string
+
+ // inDefaultChain is true when the credential is part of DefaultAzureCredential
+ inDefaultChain bool
+
+ // exec is used by tests to fake invoking Azure PowerShell
+ exec executor
+}
+
+// AzurePowerShellCredential authenticates as the identity logged in to Azure PowerShell.
+type AzurePowerShellCredential struct {
+ mu *sync.Mutex
+ opts AzurePowerShellCredentialOptions
+}
+
+// NewAzurePowerShellCredential constructs an AzurePowerShellCredential. Pass nil to accept default options.
+func NewAzurePowerShellCredential(options *AzurePowerShellCredentialOptions) (*AzurePowerShellCredential, error) {
+ cp := AzurePowerShellCredentialOptions{}
+
+ if options != nil {
+ cp = *options
+ }
+
+ if cp.TenantID != "" && !validTenantID(cp.TenantID) {
+ return nil, errInvalidTenantID
+ }
+
+ if cp.exec == nil {
+ cp.exec = shellExec
+ }
+
+ cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants)
+
+ return &AzurePowerShellCredential{mu: &sync.Mutex{}, opts: cp}, nil
+}
+
+// GetToken requests a token from Azure PowerShell. This credential doesn't cache tokens, so every call invokes Azure PowerShell.
+// This method is called automatically by Azure SDK clients.
+func (c *AzurePowerShellCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ at := azcore.AccessToken{}
+
+ if len(opts.Scopes) != 1 {
+ return at, errors.New(credNameAzurePowerShell + ": GetToken() requires exactly one scope")
+ }
+
+ if !validScope(opts.Scopes[0]) {
+ return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzurePowerShell, opts.Scopes[0])
+ }
+
+ tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzurePowerShell, c.opts.AdditionallyAllowedTenants)
+ if err != nil {
+ return at, err
+ }
+
+ // Always pass a Microsoft Entra ID v1 resource URI (not a v2 scope) because Get-AzAccessToken only supports v1 resource URIs.
+ resource := strings.TrimSuffix(opts.Scopes[0], defaultSuffix)
+
+ tenantArg := ""
+ if tenant != "" {
+ tenantArg = fmt.Sprintf(" -TenantId '%s'", tenant)
+ }
+
+ if opts.Claims != "" {
+ encoded := base64.StdEncoding.EncodeToString([]byte(opts.Claims))
+ return at, fmt.Errorf(
+ "%s.GetToken(): Azure PowerShell requires multifactor authentication or additional claims. Run this command then retry the operation: Connect-AzAccount%s -ClaimsChallenge '%s'",
+ credNameAzurePowerShell,
+ tenantArg,
+ encoded,
+ )
+ }
+
+ // Inline script to handle Get-AzAccessToken differences between Az.Accounts versions with SecureString handling and minimum version requirement
+ script := fmt.Sprintf(`
+$ErrorActionPreference = 'Stop'
+[version]$minimumVersion = '2.2.0'
+
+$mod = Import-Module Az.Accounts -MinimumVersion $minimumVersion -PassThru -ErrorAction SilentlyContinue
+
+if (-not $mod) {
+ Write-Error '%s'
+}
+
+$params = @{
+ ResourceUrl = '%s'
+ WarningAction = 'Ignore'
+}
+
+# Only force AsSecureString for Az.Accounts versions > 2.17.0 and < 5.0.0 which return plain text token by default.
+# Newer Az.Accounts versions return SecureString token by default and no longer use AsSecureString parameter.
+if ($mod.Version -ge [version]'2.17.0' -and $mod.Version -lt [version]'5.0.0') {
+ $params['AsSecureString'] = $true
+}
+
+$tenantId = '%s'
+if ($tenantId.Length -gt 0) {
+ $params['TenantId'] = '%s'
+}
+
+$token = Get-AzAccessToken @params
+
+$customToken = New-Object -TypeName psobject
+
+# The following .NET interop pattern is supported in all PowerShell versions and safely converts SecureString to plain text.
+if ($token.Token -is [System.Security.SecureString]) {
+ $ssPtr = [System.Runtime.InteropServices.Marshal]::SecureStringToBSTR($token.Token)
+ try {
+ $plainToken = [System.Runtime.InteropServices.Marshal]::PtrToStringBSTR($ssPtr)
+ } finally {
+ [System.Runtime.InteropServices.Marshal]::ZeroFreeBSTR($ssPtr)
+ }
+ $customToken | Add-Member -MemberType NoteProperty -Name Token -Value $plainToken
+} else {
+ $customToken | Add-Member -MemberType NoteProperty -Name Token -Value $token.Token
+}
+$customToken | Add-Member -MemberType NoteProperty -Name ExpiresOn -Value $token.ExpiresOn.ToUnixTimeSeconds()
+
+$jsonToken = $customToken | ConvertTo-Json
+return $jsonToken
+`, noAzAccountModule, resource, tenant, tenant)
+
+ // Windows: prefer pwsh.exe (PowerShell Core), fallback to powershell.exe (Windows PowerShell)
+ // Unix: only support pwsh (PowerShell Core)
+ exe := "pwsh"
+ if runtime.GOOS == "windows" {
+ if _, err := exec.LookPath("pwsh.exe"); err == nil {
+ exe = "pwsh.exe"
+ } else {
+ exe = "powershell.exe"
+ }
+ }
+
+ command := exe + " -NoProfile -NonInteractive -OutputFormat Text -EncodedCommand " + base64EncodeUTF16LE(script)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ b, err := c.opts.exec(ctx, credNameAzurePowerShell, command)
+ if err == nil {
+ at, err = c.createAccessToken(b)
+ }
+
+ if err != nil {
+ err = unavailableIfInDAC(err, c.opts.inDefaultChain)
+ return at, err
+ }
+
+ msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzurePowerShell, strings.Join(opts.Scopes, ", "))
+ log.Write(EventAuthentication, msg)
+
+ return at, nil
+}
+
+func (c *AzurePowerShellCredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
+ t := struct {
+ Token string `json:"Token"`
+ ExpiresOn int64 `json:"ExpiresOn"`
+ }{}
+
+ err := json.Unmarshal(tk, &t)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+
+ converted := azcore.AccessToken{
+ Token: t.Token,
+ ExpiresOn: time.Unix(t.ExpiresOn, 0).UTC(),
+ }
+
+ return converted, nil
+}
+
+// Encodes a string to Base64 using UTF-16LE encoding
+func base64EncodeUTF16LE(text string) string {
+ u16 := utf16.Encode([]rune(text))
+ buf := make([]byte, len(u16)*2)
+ for i, v := range u16 {
+ binary.LittleEndian.PutUint16(buf[i*2:], v)
+ }
+ return base64.StdEncoding.EncodeToString(buf)
+}
+
+// Decodes a Base64 UTF-16LE string back to string
+func base64DecodeUTF16LE(encoded string) (string, error) {
+ data, err := base64.StdEncoding.DecodeString(encoded)
+ if err != nil {
+ return "", err
+ }
+ u16 := make([]uint16, len(data)/2)
+ for i := range u16 {
+ u16[i] = binary.LittleEndian.Uint16(data[i*2:])
+ }
+ return string(utf16.Decode(u16)), nil
+}
+
+var _ azcore.TokenCredential = (*AzurePowerShellCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
index 6c35a941b..82342a025 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
@@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct {
}
// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default,
-// it tries all the credentials until one authenticates, after which it always uses that credential.
+// it tries all the credentials until one authenticates, after which it always uses that credential. For more information,
+// see [ChainedTokenCredential overview].
+//
+// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
type ChainedTokenCredential struct {
cond *sync.Cond
iterating bool
@@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine
if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil
return nil, errors.New("sources cannot contain nil")
}
+ if mc, ok := source.(*ManagedIdentityCredential); ok {
+ mc.mic.chained = true
+ }
}
cp := make([]azcore.TokenCredential, len(sources))
copy(cp, sources)
@@ -113,11 +119,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
if err != nil {
// return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise
msg := createChainedErrorMessage(errs)
- if errors.As(err, &unavailableErr) {
+ var authFailedErr *AuthenticationFailedError
+ switch {
+ case errors.As(err, &authFailedErr):
+ err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse)
+ if af, ok := err.(*AuthenticationFailedError); ok {
+ // stop Error() printing the response again; it's already in msg
+ af.omitResponse = true
+ }
+ case errors.As(err, &unavailableErr):
err = newCredentialUnavailableError(c.name, msg)
- } else {
+ default:
res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, msg, res, err)
+ err = newAuthenticationFailedError(c.name, msg, res)
}
}
return token, err
@@ -126,7 +140,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
func createChainedErrorMessage(errs []error) string {
msg := "failed to acquire a token.\nAttempted credentials:"
for _, err := range errs {
- msg += fmt.Sprintf("\n\t%s", err.Error())
+ msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t"))
}
return msg
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
index 4cd8c5144..51dd97939 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
@@ -27,20 +27,17 @@ extends:
CloudConfig:
Public:
SubscriptionConfigurations:
- - $(sub-config-azure-cloud-test-resources)
- $(sub-config-identity-test-resources)
- EnvVars:
- SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ EnableRaceDetector: true
+ Location: westus2
RunLiveTests: true
ServiceDirectory: azidentity
UsePipelineProxy: false
${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}:
+ PersistOidcToken: true
MatrixConfigs:
- Name: managed_identity_matrix
GenerateVMJobs: true
Path: sdk/azidentity/managed-identity-matrix.json
Selection: sparse
- MatrixReplace:
- - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi
- - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
index b588750ef..2307da86f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
@@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults.
@@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c
},
)
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
index 80cd96b56..9e6bca1c9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
@@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct {
// header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
// Defaults to False.
SendCertificateChain bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// ClientCertificateCredential authenticates a service principal with a certificate.
@@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- SendX5C: options.SendCertificateChain,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ SendX5C: options.SendCertificateChain,
}
c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
index 9e6772e9b..f0890fe1e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
@@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct {
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
}
// ClientSecretCredential authenticates an application with a client secret.
@@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
index 3bd08c685..58c4b585c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
@@ -29,8 +29,8 @@ type confidentialClientOptions struct {
AdditionallyAllowedTenants []string
// Assertion for on-behalf-of authentication
Assertion string
+ Cache Cache
DisableInstanceDiscovery, SendX5C bool
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// confidentialClient wraps the MSAL confidential client
@@ -107,18 +107,18 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
}
}
if err != nil {
- // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code.
- // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError.
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, err.Error(), res, err)
+ var (
+ authFailedErr *AuthenticationFailedError
+ unavailableErr credentialUnavailable
+ )
+ if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) {
+ err = newAuthenticationFailedErrorFromMSAL(c.name, err)
}
} else {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", "))
+ msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", "))
log.Write(EventAuthentication, msg)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) {
@@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide
}
func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) {
- cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE)
+ cache, err := internal.ExportReplace(c.opts.Cache, enableCAE)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
index 551d31994..aaaabc5c2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
@@ -8,6 +8,7 @@ package azidentity
import (
"context"
+ "fmt"
"os"
"strings"
@@ -16,6 +17,18 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
+const azureTokenCredentials = "AZURE_TOKEN_CREDENTIALS"
+
+// bit flags NewDefaultAzureCredential uses to parse AZURE_TOKEN_CREDENTIALS
+const (
+ env = uint8(1) << iota
+ workloadIdentity
+ managedIdentity
+ az
+ azd
+ azurePowerShell
+)
+
// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential.
// These options may not apply to all credentials in the chain.
type DefaultAzureCredentialOptions struct {
@@ -23,23 +36,34 @@ type DefaultAzureCredentialOptions struct {
// to credential types that authenticate via external tools such as the Azure CLI.
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add
- // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be
- // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
+ // This value can also be set as a semicolon delimited list of tenants in the environment variable
+ // AZURE_ADDITIONALLY_ALLOWED_TENANTS.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
- // TenantID sets the default tenant for authentication via the Azure CLI and workload identity.
+
+ // RequireAzureTokenCredentials determines whether NewDefaultAzureCredential returns an error when the environment
+ // variable AZURE_TOKEN_CREDENTIALS has no value.
+ RequireAzureTokenCredentials bool
+
+ // TenantID sets the default tenant for authentication via the Azure CLI, Azure Developer CLI, and workload identity.
TenantID string
}
-// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure.
-// It combines credentials suitable for deployment with credentials suitable for local development.
-// It attempts to authenticate with each of these credential types, in the following order, stopping
-// when one provides a token:
+// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by
+// combining credentials used in Azure hosting environments and credentials used in local development. In
+// production, it's better to use a specific credential type so authentication is more predictable and easier
+// to debug. For more information, see [DefaultAzureCredential overview].
+//
+// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order,
+// stopping when one provides a token:
//
// - [EnvironmentCredential]
// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload
@@ -48,83 +72,156 @@ type DefaultAzureCredentialOptions struct {
// - [ManagedIdentityCredential]
// - [AzureCLICredential]
// - [AzureDeveloperCLICredential]
+// - [AzurePowerShellCredential]
//
// Consult the documentation for these credential types for more information on how they authenticate.
// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
// every subsequent authentication.
+//
+// # Selecting credentials
+//
+// Set environment variable AZURE_TOKEN_CREDENTIALS to select a subset of the credential chain described above.
+// DefaultAzureCredential will try only the specified credential(s), but its other behavior remains the same.
+// Valid values for AZURE_TOKEN_CREDENTIALS are the name of any single type in the above chain, for example
+// "EnvironmentCredential" or "AzureCLICredential", and these special values:
+//
+// - "dev": try [AzureCLICredential], [AzureDeveloperCLICredential], and [AzurePowerShellCredential], in that order
+// - "prod": try [EnvironmentCredential], [WorkloadIdentityCredential], and [ManagedIdentityCredential], in that order
+//
+// [DefaultAzureCredentialOptions].RequireAzureTokenCredentials controls whether AZURE_TOKEN_CREDENTIALS must be set.
+// NewDefaultAzureCredential returns an error when RequireAzureTokenCredentials is true and AZURE_TOKEN_CREDENTIALS
+// has no value.
+//
+// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
type DefaultAzureCredential struct {
chain *ChainedTokenCredential
}
// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults.
func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) {
- var creds []azcore.TokenCredential
- var errorMessages []string
-
if options == nil {
options = &DefaultAzureCredentialOptions{}
}
+
+ var (
+ creds []azcore.TokenCredential
+ errorMessages []string
+ selected = env | workloadIdentity | managedIdentity | az | azd | azurePowerShell
+ )
+
+ if atc, ok := os.LookupEnv(azureTokenCredentials); ok {
+ switch {
+ case atc == "dev":
+ selected = az | azd | azurePowerShell
+ case atc == "prod":
+ selected = env | workloadIdentity | managedIdentity
+ case strings.EqualFold(atc, credNameEnvironment):
+ selected = env
+ case strings.EqualFold(atc, credNameWorkloadIdentity):
+ selected = workloadIdentity
+ case strings.EqualFold(atc, credNameManagedIdentity):
+ selected = managedIdentity
+ case strings.EqualFold(atc, credNameAzureCLI):
+ selected = az
+ case strings.EqualFold(atc, credNameAzureDeveloperCLI):
+ selected = azd
+ case strings.EqualFold(atc, credNameAzurePowerShell):
+ selected = azurePowerShell
+ default:
+ return nil, fmt.Errorf(`invalid %s value %q. Valid values are "dev", "prod", or the name of any credential type in the default chain. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information`, azureTokenCredentials, atc)
+ }
+ } else if options.RequireAzureTokenCredentials {
+ return nil, fmt.Errorf("%s must be set when RequireAzureTokenCredentials is true. See https://aka.ms/azsdk/go/identity/docs#DefaultAzureCredential for more information", azureTokenCredentials)
+ }
+
additionalTenants := options.AdditionallyAllowedTenants
if len(additionalTenants) == 0 {
if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" {
additionalTenants = strings.Split(tenants, ";")
}
}
-
- envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- additionallyAllowedTenants: additionalTenants,
- })
- if err == nil {
- creds = append(creds, envCred)
- } else {
- errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
+ if selected&env != 0 {
+ envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ additionallyAllowedTenants: additionalTenants,
+ })
+ if err == nil {
+ creds = append(creds, envCred)
+ } else {
+ errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameEnvironment, err: err})
+ }
}
-
- wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, wic)
- } else {
- errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
+ if selected&workloadIdentity != 0 {
+ wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ TenantID: options.TenantID,
+ })
+ if err == nil {
+ creds = append(creds, wic)
+ } else {
+ errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
+ }
}
-
- o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true}
- if ID, ok := os.LookupEnv(azureClientID); ok {
- o.ID = ClientID(ID)
+ if selected&managedIdentity != 0 {
+ o := &ManagedIdentityCredentialOptions{
+ ClientOptions: options.ClientOptions,
+ // enable special DefaultAzureCredential behavior (IMDS probing) only when the chain contains another credential
+ dac: selected^managedIdentity != 0,
+ }
+ if ID, ok := os.LookupEnv(azureClientID); ok {
+ o.ID = ClientID(ID)
+ }
+ miCred, err := NewManagedIdentityCredential(o)
+ if err == nil {
+ creds = append(creds, miCred)
+ } else {
+ errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
+ }
}
- miCred, err := NewManagedIdentityCredential(o)
- if err == nil {
- creds = append(creds, miCred)
- } else {
- errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
+ if selected&az != 0 {
+ azCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ inDefaultChain: true,
+ })
+ if err == nil {
+ creds = append(creds, azCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
+ }
}
-
- cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID})
- if err == nil {
- creds = append(creds, cliCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
+ if selected&azd != 0 {
+ azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ inDefaultChain: true,
+ })
+ if err == nil {
+ creds = append(creds, azdCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
+ }
}
-
- azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, azdCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
+ if selected&azurePowerShell != 0 {
+ azurePowerShellCred, err := NewAzurePowerShellCredential(&AzurePowerShellCredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ inDefaultChain: true,
+ })
+ if err == nil {
+ creds = append(creds, azurePowerShellCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzurePowerShell+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzurePowerShell, err: err})
+ }
}
if len(errorMessages) > 0 {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
index be963d3a2..e2ca8bced 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
@@ -7,22 +7,81 @@
package azidentity
import (
+ "bytes"
+ "context"
"errors"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
"time"
)
// cliTimeout is the default timeout for authentication attempts via CLI tools
const cliTimeout = 10 * time.Second
-// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a
+// executor runs a command and returns its output or an error
+type executor func(ctx context.Context, credName, command string) ([]byte, error)
+
+var shellExec = func(ctx context.Context, credName, command string) ([]byte, error) {
+ // set a default timeout for this authentication iff the caller hasn't done so already
+ var cancel context.CancelFunc
+ if _, hasDeadline := ctx.Deadline(); !hasDeadline {
+ ctx, cancel = context.WithTimeout(ctx, cliTimeout)
+ defer cancel()
+ }
+ var cmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ dir := os.Getenv("SYSTEMROOT")
+ if dir == "" {
+ return nil, newCredentialUnavailableError(credName, `environment variable "SYSTEMROOT" has no value`)
+ }
+ cmd = exec.CommandContext(ctx, "cmd.exe", "/c", command)
+ cmd.Dir = dir
+ } else {
+ cmd = exec.CommandContext(ctx, "/bin/sh", "-c", command)
+ cmd.Dir = "/bin"
+ }
+ cmd.Env = os.Environ()
+ stderr := bytes.Buffer{}
+ cmd.Stderr = &stderr
+ cmd.WaitDelay = 100 * time.Millisecond
+
+ stdout, err := cmd.Output()
+ if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
+ // The child process wrote to stdout and exited without closing it.
+ // Swallow this error and return stdout because it may contain a token.
+ return stdout, nil
+ }
+ if err != nil {
+ msg := stderr.String()
+ var exErr *exec.ExitError
+ if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.Contains(msg, "' is not recognized") {
+ return nil, newCredentialUnavailableError(credName, "executable not found on path")
+ }
+ if credName == credNameAzurePowerShell {
+ if strings.Contains(msg, "Connect-AzAccount") {
+ msg = `Please run "Connect-AzAccount" to set up an account`
+ }
+ if strings.Contains(msg, noAzAccountModule) {
+ msg = noAzAccountModule
+ }
+ }
+ if msg == "" {
+ msg = err.Error()
+ }
+ return nil, newAuthenticationFailedError(credName, msg, nil)
+ }
+
+ return stdout, nil
+}
+
+// unavailableIfInDAC returns err or, if the credential was invoked by DefaultAzureCredential, a
// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try
// the next credential in its chain (another developer credential).
-func unavailableIfInChain(err error, inDefaultChain bool) error {
- if err != nil && inDefaultChain {
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error())
- }
+func unavailableIfInDAC(err error, inDefaultChain bool) error {
+ if err != nil && inDefaultChain && !errors.As(err, new(credentialUnavailable)) {
+ err = NewCredentialUnavailableError(err.Error())
}
return err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
index cd30bedd5..53ae9767f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
@@ -21,22 +21,31 @@ const credNameDeviceCode = "DeviceCodeCredential"
type DeviceCodeCredentialOptions struct {
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
+ // ClientID is the ID of the application to which users will authenticate. When not set, users
+ // will authenticate to an Azure development application, which isn't recommended for production
+ // scenarios. In production, developers should instead register their applications and assign
+ // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
+ // information.
ClientID string
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
+ // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
- disableAutomaticAuthentication bool
+ DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@@ -49,9 +58,6 @@ type DeviceCodeCredentialOptions struct {
// applications.
TenantID string
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-
// UserPrompt controls how the credential presents authentication instructions. The credential calls
// this function with authentication details when it receives a device code. By default, the credential
// prints these details to stdout.
@@ -101,12 +107,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
DeviceCodePrompt: cp.UserPrompt,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
- Record: cp.authenticationRecord,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
+ Record: cp.AuthenticationRecord,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts)
if err != nil {
@@ -116,8 +122,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
return &DeviceCodeCredential{client: c}, nil
}
-// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+// Authenticate prompts a user to log in via the device code flow. Subsequent
+// GetToken calls will automatically use the returned AuthenticationRecord.
+func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
index b30f5474f..f04d40ea4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -18,7 +18,10 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
+const (
+ credNameEnvironment = "EnvironmentCredential"
+ envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
+)
// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential
type EnvironmentCredentialOptions struct {
@@ -60,21 +63,13 @@ type EnvironmentCredentialOptions struct {
// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this
// function isn't able to parse your certificate, use [ClientCertificateCredential] instead.
//
-// # User with username and password
-//
-// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
-//
-// AZURE_CLIENT_ID: client ID of the application the user will authenticate to
-//
-// AZURE_USERNAME: a username (usually an email address)
-//
-// AZURE_PASSWORD: the user's password
-//
// # Configuration for multitenant applications
//
// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants
// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set
// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type EnvironmentCredential struct {
cred azcore.TokenCredential
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
index 35fa01d13..33cb63be0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
@@ -38,18 +38,30 @@ type AuthenticationFailedError struct {
// RawResponse is the HTTP response motivating the error, if available.
RawResponse *http.Response
- credType string
- message string
- err error
+ credType, message string
+ omitResponse bool
}
-func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error {
- return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err}
+func newAuthenticationFailedError(credType, message string, resp *http.Response) error {
+ return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp}
+}
+
+// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error.
+// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error
+// message, because that message is redundant given the response. If the original error isn't a
+// CallErr, the returned error incorporates its message.
+func newAuthenticationFailedErrorFromMSAL(credType string, err error) error {
+ msg := ""
+ res := getResponseFromError(err)
+ if res == nil {
+ msg = err.Error()
+ }
+ return newAuthenticationFailedError(credType, msg, res)
}
// Error implements the error interface. Note that the message contents are not contractual and can change over time.
func (e *AuthenticationFailedError) Error() string {
- if e.RawResponse == nil {
+ if e.RawResponse == nil || e.omitResponse {
return e.credType + ": " + e.message
}
msg := &bytes.Buffer{}
@@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string {
fmt.Fprintln(msg, "Request information not available")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
- fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
+ fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := runtime.Payload(e.RawResponse)
switch {
@@ -87,12 +99,12 @@ func (e *AuthenticationFailedError) Error() string {
anchor = "apc"
case credNameCert:
anchor = "client-cert"
+ case credNameAzurePowerShell:
+ anchor = "azure-pwsh"
case credNameSecret:
anchor = "client-secret"
case credNameManagedIdentity:
anchor = "managed-id"
- case credNameUserPassword:
- anchor = "username-password"
case credNameWorkloadIdentity:
anchor = "workload"
}
@@ -109,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() {
var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil)
-// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
+// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
// because the credential requires user interaction and is configured not to request it automatically.
-type authenticationRequiredError struct {
+type AuthenticationRequiredError struct {
credentialUnavailableError
// TokenRequestOptions for the required token. Pass this to the credential's Authenticate method.
TokenRequestOptions policy.TokenRequestOptions
}
-func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
- return &authenticationRequiredError{
+func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
+ return &AuthenticationRequiredError{
credentialUnavailableError: credentialUnavailableError{
credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively",
},
@@ -128,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti
}
var (
- _ credentialUnavailable = (*authenticationRequiredError)(nil)
- _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil)
+ _ credentialUnavailable = (*AuthenticationRequiredError)(nil)
+ _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil)
)
type credentialUnavailable interface {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
index 04ea962b4..6dd5b3d64 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
@@ -1,4 +1,4 @@
-go 1.18
+go 1.23.0
use (
.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
deleted file mode 100644
index c592f283b..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
+++ /dev/null
@@ -1,60 +0,0 @@
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
index 056785a8a..ec89de9b5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
@@ -20,22 +20,31 @@ const credNameBrowser = "InteractiveBrowserCredential"
type InteractiveBrowserCredentialOptions struct {
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
+ // ClientID is the ID of the application to which users will authenticate. When not set, users
+ // will authenticate to an Azure development application, which isn't recommended for production
+ // scenarios. In production, developers should instead register their applications and assign
+ // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
+ // information.
ClientID string
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
+ // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
- disableAutomaticAuthentication bool
+ DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@@ -54,9 +63,6 @@ type InteractiveBrowserCredentialOptions struct {
// TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
// "organizations" tenant, which can authenticate work and school accounts.
TenantID string
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
func (o *InteractiveBrowserCredentialOptions) init() {
@@ -82,13 +88,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
LoginHint: cp.LoginHint,
- Record: cp.authenticationRecord,
+ Record: cp.AuthenticationRecord,
RedirectURL: cp.RedirectURL,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts)
if err != nil {
@@ -97,8 +103,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
return &InteractiveBrowserCredential{client: c}, nil
}
-// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+// Authenticate opens the default browser so a user can log in. Subsequent
+// GetToken calls will automatically use the returned AuthenticationRecord.
+func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go
new file mode 100644
index 000000000..c0cfe7606
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go
@@ -0,0 +1,86 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package internal
+
+import (
+ "sync"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+)
+
+// Cache represents a persistent cache that makes authentication data available across processes.
+// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
+// [persistent user authentication example] shows how to use a persistent cache to reuse user
+// logins across application runs. For service principal credential types such as
+// [ClientCertificateCredential], simply set the Cache field on the credential options.
+//
+// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
+type Cache struct {
+ // impl is a pointer so a Cache can carry persistent state across copies
+ impl *impl
+}
+
+// impl is a Cache's private implementation
+type impl struct {
+ // factory constructs storage implementations
+ factory func(bool) (cache.ExportReplace, error)
+ // cae and noCAE are previously constructed storage implementations. CAE
+ // and non-CAE tokens must be stored separately because MSAL's cache doesn't
+ // observe token claims. If a single storage implementation held both kinds
+ // of tokens, it could create a reauthentication or error loop by returning
+ // a non-CAE token lacking a required claim.
+ cae, noCAE cache.ExportReplace
+ // mu synchronizes around cae and noCAE
+ mu *sync.RWMutex
+}
+
+func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) {
+ if i == nil {
+ // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory
+ return nil, nil
+ }
+ var (
+ err error
+ xr cache.ExportReplace
+ )
+ i.mu.RLock()
+ xr = i.cae
+ if !cae {
+ xr = i.noCAE
+ }
+ i.mu.RUnlock()
+ if xr != nil {
+ return xr, nil
+ }
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ if cae {
+ if i.cae == nil {
+ if xr, err = i.factory(cae); err == nil {
+ i.cae = xr
+ }
+ }
+ return i.cae, err
+ }
+ if i.noCAE == nil {
+ if xr, err = i.factory(cae); err == nil {
+ i.noCAE = xr
+ }
+ }
+ return i.noCAE, err
+}
+
+// NewCache is the constructor for Cache. It takes a factory instead of an instance
+// because it doesn't know whether the Cache will store both CAE and non-CAE tokens.
+func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache {
+ return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}}
+}
+
+// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface.
+// It's a function instead of a method on Cache so packages in azidentity and
+// azidentity/cache can call it while applications can't. "cae" declares whether the
+// caller intends this implementation to store CAE tokens.
+func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) {
+ return c.impl.exportReplace(cae)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
deleted file mode 100644
index b1b4d5c8b..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-// TokenCachePersistenceOptions contains options for persistent token caching
-type TokenCachePersistenceOptions struct {
- // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text
- // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts
- // encryption before falling back to plaintext storage.
- AllowUnencryptedStorage bool
-
- // Name identifies the cache. Set this to isolate data from other applications.
- Name string
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
deleted file mode 100644
index c1498b464..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
+++ /dev/null
@@ -1,31 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-import (
- "errors"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
-)
-
-var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching")
-
-// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to
-// use a persistent cache must first import the cache module, which will replace this function
-// with a platform-specific implementation.
-var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) {
- if o == nil {
- return nil, nil
- }
- return nil, errMissingImport
-}
-
-// CacheFilePath returns the path to the cache file for the given name.
-// Defining it in this package makes it available to azidentity tests.
-var CacheFilePath = func(name string) (string, error) {
- return "", errMissingImport
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
index 1c3791777..063325c69 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
@@ -4,14 +4,13 @@
"Agent": {
"msi_image": {
"ArmTemplateParameters": "@{deployResources = $true}",
- "OSVmImage": "env:LINUXNEXTVMIMAGE",
+ "OSVmImage": "env:LINUXVMIMAGE",
"Pool": "env:LINUXPOOL"
}
},
"GoVersion": [
- "1.22.1"
- ],
- "IDENTITY_IMDS_AVAILABLE": "1"
+ "env:GO_VERSION_PREVIOUS"
+ ]
}
]
-}
+}
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
index 6122cc700..0735d1fcb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -8,24 +8,18 @@ package azidentity
import (
"context"
- "encoding/json"
"errors"
"fmt"
"net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
)
const (
@@ -41,65 +35,29 @@ const (
msiResID = "msi_res_id"
msiSecret = "MSI_SECRET"
imdsAPIVersion = "2018-02-01"
- azureArcAPIVersion = "2019-08-15"
+ azureArcAPIVersion = "2020-06-01"
qpClientID = "client_id"
serviceFabricAPIVersion = "2019-07-01-preview"
)
var imdsProbeTimeout = time.Second
-type msiType int
-
-const (
- msiTypeAppService msiType = iota
- msiTypeAzureArc
- msiTypeAzureML
- msiTypeCloudShell
- msiTypeIMDS
- msiTypeServiceFabric
-)
-
type managedIdentityClient struct {
- azClient *azcore.Client
- endpoint string
- id ManagedIDKind
- msiType msiType
- probeIMDS bool
-}
-
-// arcKeyDirectory returns the directory expected to contain Azure Arc keys
-var arcKeyDirectory = func() (string, error) {
- switch runtime.GOOS {
- case "linux":
- return "/var/opt/azcmagent/tokens", nil
- case "windows":
- pd := os.Getenv("ProgramData")
- if pd == "" {
- return "", errors.New("environment variable ProgramData has no value")
- }
- return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil
- default:
- return "", fmt.Errorf("unsupported OS %q", runtime.GOOS)
- }
-}
-
-type wrappedNumber json.Number
-
-func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
- c := string(b)
- if c == "\"\"" {
- return nil
- }
- return json.Unmarshal(b, (*json.Number)(n))
+ azClient *azcore.Client
+ imds, probeIMDS, userAssigned bool
+ // chained indicates whether the client is part of a credential chain. If true, the client will return
+ // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response.
+ chained bool
+ msalClient msalManagedIdentityClient
}
// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
func setIMDSRetryOptionDefaults(o *policy.RetryOptions) {
if o.MaxRetries == 0 {
- o.MaxRetries = 5
+ o.MaxRetries = 6
}
if o.MaxRetryDelay == 0 {
- o.MaxRetryDelay = 1 * time.Minute
+ o.MaxRetryDelay = 25 * time.Second
}
if o.RetryDelay == 0 {
o.RetryDelay = 2 * time.Second
@@ -138,39 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
options = &ManagedIdentityCredentialOptions{}
}
cp := options.ClientOptions
- c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
- env := "IMDS"
- if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
- if _, ok := os.LookupEnv(identityHeader); ok {
- if _, ok := os.LookupEnv(identityServerThumbprint); ok {
- env = "Service Fabric"
- c.endpoint = endpoint
- c.msiType = msiTypeServiceFabric
- } else {
- env = "App Service"
- c.endpoint = endpoint
- c.msiType = msiTypeAppService
- }
- } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
- env = "Azure Arc"
- c.endpoint = endpoint
- c.msiType = msiTypeAzureArc
- }
- } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
- c.endpoint = endpoint
- if _, ok := os.LookupEnv(msiSecret); ok {
- env = "Azure ML"
- c.msiType = msiTypeAzureML
- } else {
- env = "Cloud Shell"
- c.msiType = msiTypeCloudShell
- }
- } else {
+ c := managedIdentityClient{}
+ source, err := managedidentity.GetSource()
+ if err != nil {
+ return nil, err
+ }
+ env := string(source)
+ if source == managedidentity.DefaultToIMDS {
+ env = "IMDS"
+ c.imds = true
c.probeIMDS = options.dac
setIMDSRetryOptionDefaults(&cp.Retry)
}
- client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{
+ c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{
Tracing: azruntime.TracingOptions{
Namespace: traceNamespace,
},
@@ -178,39 +117,65 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
if err != nil {
return nil, err
}
- c.azClient = client
+
+ id := managedidentity.SystemAssigned()
+ if options.ID != nil {
+ c.userAssigned = true
+ switch s := options.ID.String(); options.ID.idKind() {
+ case miClientID:
+ id = managedidentity.UserAssignedClientID(s)
+ case miObjectID:
+ id = managedidentity.UserAssignedObjectID(s)
+ case miResourceID:
+ id = managedidentity.UserAssignedResourceID(s)
+ }
+ }
+ msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled())
+ if err != nil {
+ return nil, err
+ }
+ c.msalClient = &msalClient
if log.Should(EventAuthentication) {
- log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
+ msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env)
+ if options.ID != nil {
+ kind := "client"
+ switch options.ID.(type) {
+ case ObjectID:
+ kind = "object"
+ case ResourceID:
+ kind = "resource"
+ }
+ msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String())
+ }
+ log.Write(EventAuthentication, msg)
}
return &c, nil
}
-// provideToken acquires a token for MSAL's confidential.Client, which caches the token
-func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) {
- result := confidential.TokenProviderResult{}
- tk, err := c.authenticate(ctx, c.id, params.Scopes)
- if err == nil {
- result.AccessToken = tk.Token
- result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds())
- }
- return result, err
+func (*managedIdentityClient) CloseIdleConnections() {
+ // do nothing
+}
+
+func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) {
+ return doForClient(c.azClient, r)
}
// authenticate acquires an access token
-func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
+func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
// no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
// and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
if c.probeIMDS {
+ // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available
cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
defer cancel()
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
- req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
- if err == nil {
- _, err = c.azClient.Pipeline().Do(req)
- }
+ req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint)
if err != nil {
+ return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
+ }
+ if _, err = c.azClient.Pipeline().Do(req); err != nil {
msg := err.Error()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information"
@@ -221,25 +186,27 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
c.probeIMDS = false
}
- msg, err := c.createAuthRequest(ctx, id, scopes)
- if err != nil {
- return azcore.AccessToken{}, err
- }
-
- resp, err := c.azClient.Pipeline().Do(msg)
- if err != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err)
- }
-
- if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
- return c.createAccessToken(resp)
+ ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims))
+ if err == nil {
+ msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", "))
+ log.Write(EventAuthentication, msg)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
-
- if c.msiType == msiTypeIMDS {
+ if c.imds {
+ var ije msalerrors.InvalidJsonErr
+ if c.chained && errors.As(err, &ije) {
+ // an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at
+ // the same address. Return a credentialUnavailableError so credential chains continue to their next credential
+ return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
+ }
+ resp := getResponseFromError(err)
+ if resp == nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
+ }
switch resp.StatusCode {
case http.StatusBadRequest:
- if id != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil)
+ if c.userAssigned {
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
}
msg := "failed to authenticate a system assigned identity"
if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 {
@@ -255,247 +222,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
}
}
}
-
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil)
-}
-
-func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
- value := struct {
- // these are the only fields that we use
- Token string `json:"access_token,omitempty"`
- RefreshToken string `json:"refresh_token,omitempty"`
- ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
- ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
- }{}
- if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
- return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err)
- }
- if value.ExpiresIn != "" {
- expiresIn, err := json.Number(value.ExpiresIn).Int64()
- if err != nil {
- return azcore.AccessToken{}, err
- }
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
- }
- switch v := value.ExpiresOn.(type) {
- case float64:
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
- case string:
- if expiresOn, err := strconv.Atoi(v); err == nil {
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
- }
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil)
- default:
- msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil)
- }
-}
-
-func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- switch c.msiType {
- case msiTypeIMDS:
- return c.createIMDSAuthRequest(ctx, id, scopes)
- case msiTypeAppService:
- return c.createAppServiceAuthRequest(ctx, id, scopes)
- case msiTypeAzureArc:
- // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
- key, err := c.getAzureArcSecretKey(ctx, scopes)
- if err != nil {
- msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
- return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err)
- }
- return c.createAzureArcAuthRequest(ctx, id, scopes, key)
- case msiTypeAzureML:
- return c.createAzureMLAuthRequest(ctx, id, scopes)
- case msiTypeServiceFabric:
- return c.createServiceFabricAuthRequest(ctx, id, scopes)
- case msiTypeCloudShell:
- return c.createCloudShellAuthRequest(ctx, id, scopes)
- default:
- return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
- }
-}
-
-func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", imdsAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(msiResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2019-08-01")
- q.Add("resource", scopes[0])
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("secret", os.Getenv(msiSecret))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2017-09-01")
- q.Add("resource", strings.Join(scopes, " "))
- q.Add("clientid", os.Getenv(defaultIdentityClientID))
- if id != nil {
- if id.idKind() == miResourceID {
- log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID")
- q.Set("clientid", "")
- q.Set(miResID, id.String())
- } else {
- q.Set("clientid", id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- q := request.Raw().URL.Query()
- request.Raw().Header.Set("Accept", "application/json")
- request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
- q.Add("api-version", serviceFabricAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
- // create the request to retreive the secret key challenge provided by the HIMDS service
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return "", err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- request.Raw().URL.RawQuery = q.Encode()
- // send the initial request to get the short-lived secret key
- response, err := c.azClient.Pipeline().Do(request)
- if err != nil {
- return "", err
- }
- // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
- // of the secret key file. Any other status code indicates an error in the request.
- if response.StatusCode != 401 {
- msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
- return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil)
- }
- header := response.Header.Get("WWW-Authenticate")
- if len(header) == 0 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil)
- }
- // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
- _, p, found := strings.Cut(header, "=")
- if !found {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil)
- }
- expected, err := arcKeyDirectory()
- if err != nil {
- return "", err
- }
- if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil)
- }
- f, err := os.Stat(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil)
- }
- if s := f.Size(); s > 4096 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil)
- }
- key, err := os.ReadFile(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil)
- }
- return string(key), nil
-}
-
-func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- data := url.Values{}
- data.Set("resource", strings.Join(scopes, " "))
- dataEncoded := data.Encode()
- body := streaming.NopCloser(strings.NewReader(dataEncoded))
- if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
- return nil, err
- }
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities")
- q := request.Raw().URL.Query()
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- return request, nil
+ err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
+ return azcore.AccessToken{}, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
index 13c043d8e..11b686ccd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
@@ -14,7 +14,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
const credNameManagedIdentity = "ManagedIdentityCredential"
@@ -22,8 +21,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential"
type managedIdentityIDKind int
const (
- miClientID managedIdentityIDKind = 0
- miResourceID managedIdentityIDKind = 1
+ miClientID managedIdentityIDKind = iota
+ miObjectID
+ miResourceID
)
// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID
@@ -32,7 +32,12 @@ type ManagedIDKind interface {
idKind() managedIdentityIDKind
}
-// ClientID is the client ID of a user-assigned managed identity.
+// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when a ClientID is specified on the following platforms:
+//
+// - Azure Arc
+// - Cloud Shell
+// - Service Fabric
type ClientID string
func (ClientID) idKind() managedIdentityIDKind {
@@ -44,7 +49,31 @@ func (c ClientID) String() string {
return string(c)
}
-// ResourceID is the resource ID of a user-assigned managed identity.
+// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when an ObjectID is specified on the following platforms:
+//
+// - Azure Arc
+// - Azure ML
+// - Cloud Shell
+// - Service Fabric
+type ObjectID string
+
+func (ObjectID) idKind() managedIdentityIDKind {
+ return miObjectID
+}
+
+// String returns the string value of the ID.
+func (o ObjectID) String() string {
+ return string(o)
+}
+
+// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when a ResourceID is specified on the following platforms:
+//
+// - Azure Arc
+// - Azure ML
+// - Cloud Shell
+// - Service Fabric
type ResourceID string
func (ResourceID) idKind() managedIdentityIDKind {
@@ -60,9 +89,10 @@ func (r ResourceID) String() string {
type ManagedIdentityCredentialOptions struct {
azcore.ClientOptions
- // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity
- // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that
- // some platforms don't accept resource IDs.
+ // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of
+ // the hosting environment's default. The value may be the identity's client, object, or resource ID.
+ // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed
+ // identities, or the specified kind of ID.
ID ManagedIDKind
// dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have
@@ -73,13 +103,13 @@ type ManagedIdentityCredentialOptions struct {
dac bool
}
-// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
+// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities.
// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
-// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities:
-// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
+// user-assigned identity.
+//
+// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
- client *confidentialClient
- mic *managedIdentityClient
+ mic *managedIdentityClient
}
// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
@@ -91,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
if err != nil {
return nil, err
}
- cred := confidential.NewCredFromTokenProvider(mic.provideToken)
-
- // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key.
- // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL.
- clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY"
- if options.ID != nil {
- clientID = options.ID.String()
- }
- // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
- c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
- ClientOptions: options.ClientOptions,
- })
- if err != nil {
- return nil, err
- }
- return &ManagedIdentityCredential{client: c, mic: mic}, nil
+ return &ManagedIdentityCredential{mic: mic}, nil
}
// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
if len(opts.Scopes) != 1 {
err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
return azcore.AccessToken{}, err
}
- // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
+ // managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
+ return c.mic.GetToken(ctx, opts)
}
var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
index b3d22dbf3..053d1785f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
@@ -30,12 +30,12 @@ type publicClientOptions struct {
azcore.ClientOptions
AdditionallyAllowedTenants []string
+ Cache Cache
DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
DisableAutomaticAuthentication bool
DisableInstanceDiscovery bool
LoginHint, RedirectURL string
- Record authenticationRecord
- TokenCachePersistenceOptions *tokenCachePersistenceOptions
+ Record AuthenticationRecord
Username, Password string
}
@@ -48,7 +48,7 @@ type publicClient struct {
host string
name string
opts publicClientOptions
- record authenticationRecord
+ record AuthenticationRecord
azClient *azcore.Client
}
@@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p
}, nil
}
-func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) {
+func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) {
if tro == nil {
tro = &policy.TokenRequestOptions{}
}
if len(tro.Scopes) == 0 {
if p.defaultScope == nil {
- return authenticationRecord{}, errScopeRequired
+ return AuthenticationRecord{}, errScopeRequired
}
tro.Scopes = p.defaultScope
}
client, mu, err := p.client(*tro)
if err != nil {
- return authenticationRecord{}, err
+ return AuthenticationRecord{}, err
}
mu.Lock()
defer mu.Unlock()
@@ -152,14 +152,9 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti
return p.token(ar, err)
}
if p.opts.DisableAutomaticAuthentication {
- return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro)
+ return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro)
}
- at, err := p.reqToken(ctx, client, tro)
- if err == nil {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", "))
- log.Write(EventAuthentication, msg)
- }
- return at, err
+ return p.reqToken(ctx, client, tro)
}
// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache.
@@ -222,13 +217,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient,
}
func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
- cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE)
+ c, err := internal.ExportReplace(p.opts.Cache, enableCAE)
if err != nil {
return nil, err
}
o := []public.Option{
public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)),
- public.WithCache(cache),
+ public.WithCache(c),
public.WithHTTPClient(p),
}
if enableCAE {
@@ -242,12 +237,13 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) {
if err == nil {
+ msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", "))
+ log.Write(EventAuthentication, msg)
p.record, err = newAuthenticationRecord(ar)
} else {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(p.name, err.Error(), res, err)
+ err = newAuthenticationFailedErrorFromMSAL(p.name, err)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
index a69bbce34..c5634cd21 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
@@ -5,7 +5,27 @@
param (
[hashtable] $AdditionalParameters = @{},
- [hashtable] $DeploymentOutputs
+ [hashtable] $DeploymentOutputs,
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $SubscriptionId,
+
+ [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $TenantId,
+
+ [Parameter()]
+ [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')]
+ [string] $TestApplicationId,
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $Environment,
+
+ # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors).
+ [Parameter(ValueFromRemainingArguments = $true)]
+ $RemainingArguments
)
$ErrorActionPreference = 'Stop'
@@ -16,14 +36,15 @@ if ($CI) {
Write-Host "Skipping post-provisioning script because resources weren't deployed"
return
}
- az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID']
- az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID']
+ az cloud set -n $Environment
+ az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId
+ az account set --subscription $SubscriptionId
}
-Write-Host "Building container"
+Write-Host "##[group]Building container"
$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test"
Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @"
-FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder
+FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder
ENV GOARCH=amd64 GOWORK=off
COPY . /azidentity
WORKDIR /azidentity/testdata/managed-id-test
@@ -41,39 +62,73 @@ CMD ["./managed-id-test"]
docker build -t $image "$PSScriptRoot"
az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME']
docker push $image
+Write-Host "##[endgroup]"
$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP']
+Write-Host "##[group]Deploying to VM"
+# az will return 0 when the script fails on the VM, so the script prints a UUID to indicate all commands succeeded
+$uuid = [guid]::NewGuid().ToString()
+$vmScript = @"
+az acr login -n $($DeploymentOutputs['AZIDENTITY_ACR_NAME']) && \
+sudo docker run \
+-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) \
+-e AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) \
+-e AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) \
+-p 80:8080 -d \
+$image && \
+/usr/bin/echo $uuid
+"@
+$output = az vm run-command invoke -g $rg -n $DeploymentOutputs['AZIDENTITY_VM_NAME'] --command-id RunShellScript --scripts "$vmScript" | Out-String
+Write-Host $output
+if (-not $output.Contains($uuid)) {
+ throw "couldn't start container on VM"
+}
+Write-Host "##[endgroup]"
+
# ACI is easier to provision here than in the bicep file because the image isn't available before now
-Write-Host "Deploying Azure Container Instance"
+Write-Host "##[group]Deploying Azure Container Instance"
$aciName = "azidentity-test"
az container create -g $rg -n $aciName --image $image `
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
+ --cpu 1 `
+ --ip-address Public `
+ --memory 1.0 `
+ --os-type Linux `
--role "Storage Blob Data Reader" `
--scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) `
-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) `
- AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
- AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
- FUNCTIONS_CUSTOMHANDLER_PORT=80
-Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
+ AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) `
+ FUNCTIONS_CUSTOMHANDLER_PORT=80
+$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv
+Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP"
+Write-Host "##[endgroup]"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
-Write-Host "Deploying to Azure Functions"
+Write-Host "##[group]Deploying to Azure Functions"
$container = docker create $image
docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/"
docker rm -v $container
Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force
az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip
+Write-Host "##[endgroup]"
-Write-Host "Creating federated identity"
+Write-Host "##[group]Creating federated identity"
$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME']
$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME']
$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv
$podName = "azidentity-test"
$serviceAccountName = "workload-identity-sa"
-az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName
-Write-Host "Deploying to AKS"
+az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName --audiences api://AzureADTokenExchange
+Write-Host "##[endgroup]"
+
+Write-Host "##[group]Deploying to AKS"
az aks get-credentials -g $rg -n $aksName
az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName
Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @"
@@ -110,3 +165,4 @@ spec:
"@
kubectl apply -f "$PSScriptRoot/k8s.yaml"
Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName"
+Write-Host "##[endgroup]"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
index 2a2165293..cb3b5f4df 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
@@ -19,7 +19,10 @@ param location string = resourceGroup().location
// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles
var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d')
-var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1')
+var blobReader = subscriptionResourceId(
+ 'Microsoft.Authorization/roleDefinitions',
+ '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1'
+)
resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) {
kind: 'StorageV2'
@@ -60,6 +63,16 @@ resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-
scope: containerRegistry
}
+resource acrPullVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
+ name: guid(resourceGroup().id, acrPull, 'vm')
+ properties: {
+ principalId: deployResources ? vm.identity.principalId : ''
+ principalType: 'ServicePrincipal'
+ roleDefinitionId: acrPull
+ }
+ scope: containerRegistry
+}
+
resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
scope: saUserAssigned
name: guid(resourceGroup().id, blobReader, usermgdid.id)
@@ -80,6 +93,16 @@ resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if
scope: sa
}
+resource blobRoleVM 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
+ scope: sa
+ name: guid(resourceGroup().id, blobReader, 'vm')
+ properties: {
+ principalId: deployResources ? vm.identity.principalId : ''
+ roleDefinitionId: blobReader
+ principalType: 'ServicePrincipal'
+ }
+}
+
resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) {
location: location
name: uniqueString(resourceGroup().id)
@@ -135,6 +158,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) {
name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY'
value: deployResources ? usermgdid.id : null
}
+ {
+ name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID'
+ value: deployResources ? usermgdid.properties.clientId : null
+ }
+ {
+ name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID'
+ value: deployResources ? usermgdid.properties.principalId : null
+ }
{
name: 'AzureWebJobsStorage'
value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}'
@@ -207,6 +238,143 @@ resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deplo
}
}
+resource publicIP 'Microsoft.Network/publicIPAddresses@2023-05-01' = if (deployResources) {
+ name: '${baseName}PublicIP'
+ location: location
+ sku: {
+ name: 'Standard'
+ }
+ properties: {
+ publicIPAllocationMethod: 'Static'
+ }
+}
+
+resource nsg 'Microsoft.Network/networkSecurityGroups@2024-07-01' = if (deployResources) {
+ name: '${baseName}NSG'
+ location: location
+ properties: {
+ securityRules: [
+ {
+ name: 'AllowHTTP'
+ properties: {
+ description: 'Allow HTTP traffic on port 80'
+ protocol: 'Tcp'
+ sourcePortRange: '*'
+ destinationPortRange: '80'
+ sourceAddressPrefix: '*'
+ destinationAddressPrefix: '*'
+ access: 'Allow'
+ priority: 1000
+ direction: 'Inbound'
+ }
+ }
+ ]
+ }
+}
+
+resource vnet 'Microsoft.Network/virtualNetworks@2024-07-01' = if (deployResources) {
+ name: '${baseName}vnet'
+ location: location
+ properties: {
+ addressSpace: {
+ addressPrefixes: [
+ '10.0.0.0/16'
+ ]
+ }
+ subnets: [
+ {
+ name: '${baseName}subnet'
+ properties: {
+ addressPrefix: '10.0.0.0/24'
+ defaultOutboundAccess: false
+ networkSecurityGroup: {
+ id: deployResources ? nsg.id : ''
+ }
+ }
+ }
+ ]
+ }
+}
+
+resource nic 'Microsoft.Network/networkInterfaces@2024-07-01' = if (deployResources) {
+ name: '${baseName}NIC'
+ location: location
+ properties: {
+ ipConfigurations: [
+ {
+ name: 'myIPConfig'
+ properties: {
+ privateIPAllocationMethod: 'Dynamic'
+ publicIPAddress: {
+ id: deployResources ? publicIP.id : ''
+ }
+ subnet: {
+ id: deployResources ? vnet.properties.subnets[0].id : ''
+ }
+ }
+ }
+ ]
+ }
+}
+
+resource vm 'Microsoft.Compute/virtualMachines@2024-07-01' = if (deployResources) {
+ name: '${baseName}vm'
+ location: location
+ identity: {
+ type: 'SystemAssigned, UserAssigned'
+ userAssignedIdentities: {
+ '${deployResources ? usermgdid.id: ''}': {}
+ }
+ }
+ properties: {
+ hardwareProfile: {
+ vmSize: 'Standard_DS1_v2'
+ }
+ osProfile: {
+ adminUsername: adminUser
+ computerName: '${baseName}vm'
+ customData: base64('''
+#cloud-config
+package_update: true
+packages:
+ - docker.io
+runcmd:
+ - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
+ - az login --identity --allow-no-subscriptions
+''')
+ linuxConfiguration: {
+ disablePasswordAuthentication: true
+ ssh: {
+ publicKeys: [
+ {
+ path: '/home/${adminUser}/.ssh/authorized_keys'
+ keyData: sshPubKey
+ }
+ ]
+ }
+ }
+ }
+ networkProfile: {
+ networkInterfaces: [
+ {
+ id: deployResources ? nic.id : ''
+ }
+ ]
+ }
+ storageProfile: {
+ imageReference: {
+ publisher: 'Canonical'
+ offer: 'ubuntu-24_04-lts'
+ sku: 'server'
+ version: 'latest'
+ }
+ osDisk: {
+ createOption: 'FromImage'
+ }
+ }
+ }
+}
+
output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : ''
output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : ''
output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : ''
@@ -217,3 +385,6 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs
output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : ''
+output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : ''
+output AZIDENTITY_VM_NAME string = deployResources ? vm.name : ''
+output AZIDENTITY_VM_IP string = deployResources ? publicIP.properties.ipAddress : ''
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
index 294ed81e9..5791e7d22 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
@@ -17,6 +17,11 @@ import (
const credNameUserPassword = "UsernamePasswordCredential"
// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
+//
+// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor
+// authentication. See [Entra ID documentation] for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredentialOptions struct {
azcore.ClientOptions
@@ -25,24 +30,31 @@ type UsernamePasswordCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
-// with any form of multi-factor authentication, and the application must already have user or admin consent.
+// with any form of multifactor authentication, and the application must already have user or admin consent.
// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
+//
+// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation]
+// for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredential struct {
client *publicClient
}
@@ -54,13 +66,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
options = &UsernamePasswordCredentialOptions{}
}
opts := publicClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- Password: password,
- Record: options.authenticationRecord,
- TokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- Username: username,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ Password: password,
+ Record: options.AuthenticationRecord,
+ Username: username,
}
c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts)
if err != nil {
@@ -70,7 +82,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
}
// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
index 4305b5d3d..bb8bddb16 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
- version = "v1.7.0"
+ version = "v1.13.0"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
index 3e43e788e..6fecada2f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
@@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID.
ClientID string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
// TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID.
TenantID string
+
// TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the
// environment variable AZURE_FEDERATED_TOKEN_FILE.
TokenFilePath string
@@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (
w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}}
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
index 8ee66b526..779657b23 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
@@ -6,6 +6,8 @@
package errorinfo
+import "errors"
+
// NonRetriable represents a non-transient error. This works in
// conjunction with the retry policy, indicating that the error condition
// is idempotent, so no retries will be attempted.
@@ -15,10 +17,14 @@ type NonRetriable interface {
NonRetriable()
}
-// NonRetriableError marks the specified error as non-retriable.
-// This function takes an error as input and returns a new error that is marked as non-retriable.
+// NonRetriableError ensures the specified error is [NonRetriable]. If
+// the error is already [NonRetriable], it returns that error unchanged.
+// Otherwise, it returns a new, [NonRetriable] error.
func NonRetriableError(err error) error {
- return &nonRetriableError{err}
+ if !errors.As(err, new(NonRetriable)) {
+ err = &nonRetriableError{err}
+ }
+ return err
}
// nonRetriableError is a struct that embeds the error interface.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
index 4f1dcf1b7..76dadf7d3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
@@ -44,7 +44,7 @@ func Should(cls Event) bool {
if log.lst == nil {
return false
}
- if log.cls == nil || len(log.cls) == 0 {
+ if len(log.cls) == 0 {
return true
}
for _, c := range log.cls {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
index 238ef42ed..02aa1fb3b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
@@ -11,9 +11,17 @@ import (
"time"
)
+// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it.
+var backoff = func(now, lastAttempt time.Time) bool {
+ return lastAttempt.Add(30 * time.Second).After(now)
+}
+
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
+// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration.
+type ShouldRefresh[TResource, TState any] func(TResource, TState) bool
+
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
@@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct {
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
+ // shouldRefresh indicates whether the resource should be refreshed before expiration
+ shouldRefresh ShouldRefresh[TResource, TState]
+
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
- return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
+ r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})}
+ r.shouldRefresh = r.expiringSoon
+ return r
+}
+
+// ResourceOptions contains optional configuration for Resource
+type ResourceOptions[TResource, TState any] struct {
+ // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite
+ // the currently held resource not having expired. [Resource.Get] ignores all errors from
+ // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh
+ // when the resource has expired (it unconditionally updates expired resources). When
+ // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5
+ // minutes.
+ ShouldRefresh ShouldRefresh[TResource, TState]
+}
+
+// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing.
+func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] {
+ r := NewResource(ar)
+ if opts.ShouldRefresh != nil {
+ r.shouldRefresh = opts.ShouldRefresh
+ }
+ return r
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
- // If the resource is expiring within this time window, update it eagerly.
- // This allows other threads/goroutines to keep running by using the not-yet-expired
- // resource value while one thread/goroutine updates the resource.
- const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
- const backoff = 30 * time.Second // Minimum wait time between eager update attempts
-
now, acquire, expired := time.Now(), false, false
// acquire exclusive lock
@@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
- } else if er.expiration.Add(-window).Before(now) {
- // The resource is valid but is expiring within the time window
- if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
+ } else if er.shouldRefresh(resource, state) {
+ if !(er.acquiring || backoff(now, er.lastAttempt)) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
@@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() {
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}
+
+func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool {
+ // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter
+ return er.expiration.Add(-5 * time.Minute).Before(time.Now())
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
index f86286051..549d68ab9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
@@ -18,6 +18,8 @@ import (
"encoding/pem"
"errors"
"fmt"
+ "os"
+ "strings"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
@@ -63,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
// must contain the public certificate and the private key. If a PEM block is encrypted and
// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
@@ -303,7 +312,9 @@ func WithInstanceDiscovery(enabled bool) Option {
// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail.
func WithAzureRegion(val string) Option {
return func(o *clientOptions) {
- o.azureRegion = val
+ if val != "" {
+ o.azureRegion = val
+ }
}
}
@@ -315,16 +326,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client
if err != nil {
return Client{}, err
}
-
+ autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION")
opts := clientOptions{
authority: authority,
// if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache
disableInstanceDiscovery: cred.tokenProvider != nil,
httpClient: shared.DefaultClient,
+ azureRegion: autoEnabledRegion,
}
for _, o := range options {
o(&opts)
}
+ if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") {
+ opts.azureRegion = ""
+ }
+
baseOpts := []base.Option{
base.WithCacheAccessor(opts.accessor),
base.WithClientCapabilities(opts.capabilities),
@@ -422,6 +438,7 @@ func WithClaims(claims string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -430,6 +447,7 @@ func WithClaims(claims string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -443,6 +461,8 @@ func WithClaims(claims string) interface {
t.claims = claims
case *acquireTokenOnBehalfOfOptions:
t.claims = claims
+ case *acquireTokenByUsernamePasswordOptions:
+ t.claims = claims
case *acquireTokenSilentOptions:
t.claims = claims
case *authCodeURLOptions:
@@ -489,6 +509,7 @@ func WithTenantID(tenantID string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -497,6 +518,7 @@ func WithTenantID(tenantID string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -510,6 +532,8 @@ func WithTenantID(tenantID string) interface {
t.tenantID = tenantID
case *acquireTokenOnBehalfOfOptions:
t.tenantID = tenantID
+ case *acquireTokenByUsernamePasswordOptions:
+ t.tenantID = tenantID
case *acquireTokenSilentOptions:
t.tenantID = tenantID
case *authCodeURLOptions:
@@ -585,6 +609,46 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts
return cca.base.AcquireTokenSilent(ctx, silentParameters)
}
+// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword
+type acquireTokenByUsernamePasswordOptions struct {
+ claims, tenantID string
+ authnScheme AuthenticationScheme
+}
+
+// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword
+type AcquireByUsernamePasswordOption interface {
+ acquireByUsernamePasswordOption()
+}
+
+// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication.
+// NOTE: this flow is NOT recommended.
+//
+// Options: [WithClaims], [WithTenantID]
+func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) {
+ o := acquireTokenByUsernamePasswordOptions{}
+ if err := options.ApplyOptions(&o, opts); err != nil {
+ return AuthResult{}, err
+ }
+ authParams, err := cca.base.AuthParams.WithTenant(o.tenantID)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATUsernamePassword
+ authParams.Claims = o.claims
+ authParams.Username = username
+ authParams.Password = password
+ if o.authnScheme != nil {
+ authParams.AuthnScheme = o.authnScheme
+ }
+
+ token, err := cca.base.Token.UsernamePassword(ctx, authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
+}
+
// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
type acquireTokenByAuthCodeOptions struct {
challenge, claims, tenantID string
@@ -676,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
if err != nil {
return AuthResult{}, err
}
- return cca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
index c9b8dbed0..b5cbb5721 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
@@ -64,11 +64,20 @@ type CallErr struct {
Err error
}
+type InvalidJsonErr struct {
+ Err error
+}
+
// Errors implements error.Error().
func (e CallErr) Error() string {
return e.Err.Error()
}
+// Errors implements error.Error().
+func (e InvalidJsonErr) Error() string {
+ return e.Err.Error()
+}
+
// Verbose prints a versbose error message with the request or response.
func (e CallErr) Verbose() string {
e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
index 09a0d92f5..61c1c4cec 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
@@ -5,16 +5,17 @@ package base
import (
"context"
- "errors"
"fmt"
"net/url"
"reflect"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
@@ -89,14 +90,28 @@ type AuthResult struct {
ExpiresOn time.Time
GrantedScopes []string
DeclinedScopes []string
+ Metadata AuthResultMetadata
}
+// AuthResultMetadata which contains meta data for the AuthResult
+type AuthResultMetadata struct {
+ RefreshOn time.Time
+ TokenSource TokenSource
+}
+
+type TokenSource int
+
+// These are all the types of token flows.
+const (
+ TokenSourceIdentityProvider TokenSource = 0
+ TokenSourceCache TokenSource = 1
+)
+
// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) {
if err := storageTokenResponse.AccessToken.Validate(); err != nil {
return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
}
-
account := storageTokenResponse.Account
accessToken := storageTokenResponse.AccessToken.Secret
grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
@@ -109,7 +124,18 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err)
}
}
- return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil
+ return AuthResult{
+ Account: account,
+ IDToken: idToken,
+ AccessToken: accessToken,
+ ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T,
+ GrantedScopes: grantedScopes,
+ DeclinedScopes: nil,
+ Metadata: AuthResultMetadata{
+ TokenSource: TokenSourceCache,
+ RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T,
+ },
+ }, nil
}
// NewAuthResult creates an AuthResult.
@@ -121,8 +147,12 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco
Account: account,
IDToken: tokenResponse.IDToken,
AccessToken: tokenResponse.AccessToken,
- ExpiresOn: tokenResponse.ExpiresOn.T,
+ ExpiresOn: tokenResponse.ExpiresOn,
GrantedScopes: tokenResponse.GrantedScopes.Slice,
+ Metadata: AuthResultMetadata{
+ TokenSource: TokenSourceIdentityProvider,
+ RefreshOn: tokenResponse.RefreshOn.T,
+ },
}, nil
}
@@ -137,6 +167,8 @@ type Client struct {
AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
cacheAccessor cache.ExportReplace
cacheAccessorMu *sync.RWMutex
+ canRefresh map[string]*atomic.Value
+ canRefreshMu *sync.Mutex
}
// Option is an optional argument to the New constructor.
@@ -213,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O
cacheAccessorMu: &sync.RWMutex{},
manager: storage.New(token),
pmanager: storage.NewPartitionedManager(token),
+ canRefresh: make(map[string]*atomic.Value),
+ canRefreshMu: &sync.Mutex{},
}
for _, o := range options {
if err = o(&client); err != nil {
@@ -317,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if silent.Claims == "" {
ar, err = AuthResultFromStorage(storageTokenResponse)
if err == nil {
+ if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) {
+ b.canRefreshMu.Lock()
+ refreshValue, ok := b.canRefresh[tenant]
+ if !ok {
+ refreshValue = &atomic.Value{}
+ refreshValue.Store(false)
+ b.canRefresh[tenant] = refreshValue
+ }
+ b.canRefreshMu.Unlock()
+ if refreshValue.CompareAndSwap(false, true) {
+ defer refreshValue.Store(false)
+ // Added a check to see if the token is still same because there is a chance
+ // that the token is already refreshed by another thread.
+ // If the token is not same, we don't need to refresh it.
+ // Which means it refreshed.
+ if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken {
+ if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil {
+ return b.AuthResultFromToken(ctx, authParams, tr)
+ }
+ }
+ }
+ }
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
@@ -334,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if err != nil {
return ar, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
@@ -363,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui
return AuthResult{}, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
@@ -392,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq
authParams.UserAssertion = onBehalfOfParams.UserAssertion
token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
if err == nil {
- ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
+ ar, err = b.AuthResultFromToken(ctx, authParams, token)
}
return ar, err
}
-func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
- if !cacheWrite {
- return NewAuthResult(token, shared.Account{})
- }
+func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
var m manager = b.manager
if authParams.AuthorizationType == authority.ATOnBehalfOf {
m = b.pmanager
@@ -430,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
return ar, err
}
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var Now = time.Now
+
func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
if b.cacheAccessor != nil {
b.cacheAccessorMu.RLock()
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
deleted file mode 100644
index 2221e60c4..000000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package storage holds all cached token information for MSAL. This storage can be
-// augmented with third-party extensions to provide persistent storage. In that case,
-// reads and writes in upper packages will call Marshal() to take the entire in-memory
-// representation and write it to storage and Unmarshal() to update the entire in-memory
-// storage with what was in the persistent storage. The persistent storage can only be
-// accessed in this way because multiple MSAL clients written in multiple languages can
-// access the same storage and must adhere to the same method that was defined
-// previously.
-package storage
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// aadInstanceDiscoveryer allows faking in tests.
-// It is implemented in production by ops/authority.Client
-type aadInstanceDiscoveryer interface {
- AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
-}
-
-// TokenResponse mimics a token response that was pulled from the cache.
-type TokenResponse struct {
- RefreshToken accesstokens.RefreshToken
- IDToken IDToken // *Credential
- AccessToken AccessToken
- Account shared.Account
-}
-
-// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
-// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
-// was given to it on each call.
-type Manager struct {
- contract *Contract
- contractMu sync.RWMutex
- requests aadInstanceDiscoveryer // *oauth.Token
-
- aadCacheMu sync.RWMutex
- aadCache map[string]authority.InstanceDiscoveryMetadata
-}
-
-// New is the constructor for Manager.
-func New(requests *oauth.Client) *Manager {
- m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
- m.contract = NewContract()
- return m
-}
-
-func checkAlias(alias string, aliases []string) bool {
- for _, v := range aliases {
- if alias == v {
- return true
- }
- }
- return false
-}
-
-func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
- newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
- scopeCounter := 0
- for _, scope := range scopesOne {
- for _, otherScope := range newScopesTwo {
- if strings.EqualFold(scope, otherScope) {
- scopeCounter++
- continue
- }
- }
- }
- return scopeCounter == len(scopesOne)
-}
-
-// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
-// it contains an uppercase character (v1.1+ keys are all lowercase)
-func needsUpgrade(key string) bool {
- for _, r := range key {
- if 'A' <= r && r <= 'Z' {
- return true
- }
- }
- return false
-}
-
-// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
-// the v1.0 item. Callers must hold an exclusive lock on m.
-func upgrade[T any](m map[string]T, k string) T {
- v1_1Key := strings.ToLower(k)
- v, ok := m[k]
- if !ok {
- // another goroutine did the upgrade while this one was waiting for the write lock
- return m[v1_1Key]
- }
- if v2, ok := m[v1_1Key]; ok {
- // cache has an equivalent v1.1+ item, which we prefer because we know it was added
- // by a newer version of the module and is therefore more likely to remain valid.
- // The v1.0 item may have expired because only v1.0 or earlier would update it.
- v = v2
- } else {
- // add an equivalent item according to the v1.1 schema
- m[v1_1Key] = v
- }
- delete(m, k)
- return v
-}
-
-// Read reads a storage token from the cache if it exists.
-func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
- tr := TokenResponse{}
- homeAccountID := authParameters.HomeAccountID
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- scopes := authParameters.Scopes
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
- tokenType := authParameters.AuthnScheme.AccessTokenType()
-
- // fetch metadata if instanceDiscovery is enabled
- aliases := []string{authParameters.AuthorityInfo.Host}
- if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
- metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
- if err != nil {
- return TokenResponse{}, err
- }
- aliases = metadata.Aliases
- }
-
- accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
- tr.AccessToken = accessToken
-
- if homeAccountID == "" {
- // caller didn't specify a user, so there's no reason to search for an ID or refresh token
- return tr, nil
- }
- // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
- // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
- idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
- if err == nil {
- tr.IDToken = idToken
- }
-
- if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
- // we need the family ID to identify the correct refresh token, if any
- familyID := appMetadata.FamilyID
- refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
- if err == nil {
- tr.RefreshToken = refreshToken
- }
- }
-
- account, err := m.readAccount(homeAccountID, aliases, realm)
- if err == nil {
- tr.Account = account
- }
- return tr, nil
-}
-
-const scopeSeparator = " "
-
-// Write writes a token response to the cache and returns the account information the token is stored with.
-func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
- homeAccountID := tokenResponse.HomeAccountID()
- environment := authParameters.AuthorityInfo.Host
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
- cachedAt := time.Now()
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
-
- var account shared.Account
-
- if len(tokenResponse.RefreshToken) > 0 {
- refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
- if err := m.writeRefreshToken(refreshToken); err != nil {
- return account, err
- }
- }
-
- if len(tokenResponse.AccessToken) > 0 {
- accessToken := NewAccessToken(
- homeAccountID,
- environment,
- realm,
- clientID,
- cachedAt,
- tokenResponse.ExpiresOn.T,
- tokenResponse.ExtExpiresOn.T,
- target,
- tokenResponse.AccessToken,
- tokenResponse.TokenType,
- authnSchemeKeyID,
- )
-
- // Since we have a valid access token, cache it before moving on.
- if err := accessToken.Validate(); err == nil {
- if err := m.writeAccessToken(accessToken); err != nil {
- return account, err
- }
- }
- }
-
- idTokenJwt := tokenResponse.IDToken
- if !idTokenJwt.IsZero() {
- idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
- if err := m.writeIDToken(idToken); err != nil {
- return shared.Account{}, err
- }
-
- localAccountID := idTokenJwt.LocalAccountID()
- authorityType := authParameters.AuthorityInfo.AuthorityType
-
- preferredUsername := idTokenJwt.UPN
- if idTokenJwt.PreferredUsername != "" {
- preferredUsername = idTokenJwt.PreferredUsername
- }
-
- account = shared.NewAccount(
- homeAccountID,
- environment,
- realm,
- localAccountID,
- authorityType,
- preferredUsername,
- )
- if err := m.writeAccount(account); err != nil {
- return shared.Account{}, err
- }
- }
-
- AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
-
- if err := m.writeAppMetaData(AppMetaData); err != nil {
- return shared.Account{}, err
- }
- return account, nil
-}
-
-func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- md, err := m.aadMetadataFromCache(ctx, authorityInfo)
- if err != nil {
- // not in the cache, retrieve it
- md, err = m.aadMetadata(ctx, authorityInfo)
- }
- return md, err
-}
-
-func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.RLock()
- defer m.aadCacheMu.RUnlock()
- metadata, ok := m.aadCache[authorityInfo.Host]
- if ok {
- return metadata, nil
- }
- return metadata, errors.New("not found")
-}
-
-func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.Lock()
- defer m.aadCacheMu.Unlock()
- discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return authority.InstanceDiscoveryMetadata{}, err
- }
-
- for _, metadataEntry := range discoveryResponse.Metadata {
- for _, aliasedAuthority := range metadataEntry.Aliases {
- m.aadCache[aliasedAuthority] = metadataEntry
- }
- }
- if _, ok := m.aadCache[authorityInfo.Host]; !ok {
- m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
- PreferredNetwork: authorityInfo.Host,
- PreferredCache: authorityInfo.Host,
- }
- }
- return m.aadCache[authorityInfo.Host], nil
-}
-
-func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
- m.contractMu.RLock()
- // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
- // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
- // an issue, however if it does become a problem then we know where to look.
- for k, at := range m.contract.AccessTokens {
- if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
- if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
- if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- at = upgrade(m.contract.AccessTokens, k)
- }
- return at
- }
- }
- }
- }
- m.contractMu.RUnlock()
- return AccessToken{}
-}
-
-func (m *Manager) writeAccessToken(accessToken AccessToken) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- key := accessToken.Key()
- m.contract.AccessTokens[key] = accessToken
- return nil
-}
-
-func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
- byFamily := func(rt accesstokens.RefreshToken) bool {
- return matchFamilyRefreshToken(rt, homeID, envAliases)
- }
- byClient := func(rt accesstokens.RefreshToken) bool {
- return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
- }
-
- var matchers []func(rt accesstokens.RefreshToken) bool
- if familyID == "" {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byClient, byFamily,
- }
- } else {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byFamily, byClient,
- }
- }
-
- // TODO(keegan): All the tests here pass, but Bogdan says this is
- // more complicated. I'm opening an issue for this to have him
- // review the tests and suggest tests that would break this so
- // we can re-write against good tests. His comments as follow:
- // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
- // The algorithm is:
- // If application is NOT part of the family, search by client_ID
- // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
- // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
- m.contractMu.RLock()
- for _, matcher := range matchers {
- for k, rt := range m.contract.RefreshTokens {
- if matcher(rt) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- rt = upgrade(m.contract.RefreshTokens, k)
- }
- return rt, nil
- }
- }
- }
-
- m.contractMu.RUnlock()
- return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
-}
-
-func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
-}
-
-func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
-}
-
-func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
- key := refreshToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.RefreshTokens[key] = refreshToken
- return nil
-}
-
-func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
- m.contractMu.RLock()
- for k, idt := range m.contract.IDTokens {
- if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
- if checkAlias(idt.Environment, envAliases) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- idt = upgrade(m.contract.IDTokens, k)
- }
- return idt, nil
- }
- }
- }
- m.contractMu.RUnlock()
- return IDToken{}, fmt.Errorf("token not found")
-}
-
-func (m *Manager) writeIDToken(idToken IDToken) error {
- key := idToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.IDTokens[key] = idToken
- return nil
-}
-
-func (m *Manager) AllAccounts() []shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- var accounts []shared.Account
- for _, v := range m.contract.Accounts {
- accounts = append(accounts, v)
- }
-
- return accounts
-}
-
-func (m *Manager) Account(homeAccountID string) shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- for _, v := range m.contract.Accounts {
- if v.HomeAccountID == homeAccountID {
- return v
- }
- }
-
- return shared.Account{}
-}
-
-func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
- m.contractMu.RLock()
-
- // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
- // We only use a map because the storage contract shared between all language implementations says use a map.
- // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
- // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
- // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
- // is really low (say 2). Each hash is more expensive than the entire iteration.
- for k, acc := range m.contract.Accounts {
- if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- acc = upgrade(m.contract.Accounts, k)
- }
- return acc, nil
- }
- }
- m.contractMu.RUnlock()
- return shared.Account{}, fmt.Errorf("account not found")
-}
-
-func (m *Manager) writeAccount(account shared.Account) error {
- key := account.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.Accounts[key] = account
- return nil
-}
-
-func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
- m.contractMu.RLock()
- for k, app := range m.contract.AppMetaData {
- if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- app = upgrade(m.contract.AppMetaData, k)
- }
- return app, nil
- }
- }
- m.contractMu.RUnlock()
- return AppMetaData{}, fmt.Errorf("not found")
-}
-
-func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
- key := AppMetaData.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.AppMetaData[key] = AppMetaData
- return nil
-}
-
-// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
-func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
- m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
- m.removeAccessTokens(account.HomeAccountID, account.Environment)
- m.removeIDTokens(account.HomeAccountID, account.Environment)
- m.removeAccounts(account.HomeAccountID, account.Environment)
-}
-
-func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, rt := range m.contract.RefreshTokens {
- // Check for RTs associated with the account.
- if rt.HomeAccountID == homeID && rt.Environment == env {
- // Do RT's app ownership check as a precaution, in case family apps
- // and 3rd-party apps share same token cache, although they should not.
- if rt.ClientID == clientID || rt.FamilyID != "" {
- delete(m.contract.RefreshTokens, key)
- }
- }
- }
-}
-
-func (m *Manager) removeAccessTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, at := range m.contract.AccessTokens {
- // Remove AT's associated with the account
- if at.HomeAccountID == homeID && at.Environment == env {
- // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
- // It means ATs for other apps will also be removed, it is OK because:
- // non-family apps are not supposed to share token cache to begin with;
- // Even if it happens, we keep other app's RT already, so SSO still works.
- delete(m.contract.AccessTokens, key)
- }
- }
-}
-
-func (m *Manager) removeIDTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, idt := range m.contract.IDTokens {
- // Remove ID tokens associated with the account.
- if idt.HomeAccountID == homeID && idt.Environment == env {
- delete(m.contract.IDTokens, key)
- }
- }
-}
-
-func (m *Manager) removeAccounts(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, acc := range m.contract.Accounts {
- // Remove the specified account.
- if acc.HomeAccountID == homeID && acc.Environment == env {
- delete(m.contract.Accounts, key)
- }
- }
-}
-
-// update updates the internal cache object. This is for use in tests, other uses are not
-// supported.
-func (m *Manager) update(cache *Contract) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract = cache
-}
-
-// Marshal implements cache.Marshaler.
-func (m *Manager) Marshal() ([]byte, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- return json.Marshal(m.contract)
-}
-
-// Unmarshal implements cache.Unmarshaler.
-func (m *Manager) Unmarshal(b []byte) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
-
- contract := NewContract()
-
- err := json.Unmarshal(b, contract)
- if err != nil {
- return err
- }
-
- m.contract = contract
-
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
similarity index 95%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
index f9be90276..7379e2233 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
@@ -72,6 +72,7 @@ type AccessToken struct {
ClientID string `json:"client_id,omitempty"`
Secret string `json:"secret,omitempty"`
Scopes string `json:"target,omitempty"`
+ RefreshOn internalTime.Unix `json:"refresh_on,omitempty"`
ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
@@ -83,7 +84,7 @@ type AccessToken struct {
}
// NewAccessToken is the constructor for AccessToken.
-func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
+func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
return AccessToken{
HomeAccountID: homeID,
Environment: env,
@@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
Secret: token,
Scopes: scopes,
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
+ RefreshOn: internalTime.Unix{T: refreshOn.UTC()},
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
TokenType: tokenType,
@@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
// Key outputs the key that can be used to uniquely look up this entry in a map.
func (a AccessToken) Key() string {
+ ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}
key := strings.Join(
- []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
+ ks,
shared.CacheKeySeparator,
)
// add token type to key for new access tokens types. skip for bearer token type to
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
similarity index 99%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
index c09318330..ff07d4b5a 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
@@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
realm,
clientID,
cachedAt,
- tokenResponse.ExpiresOn.T,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
new file mode 100644
index 000000000..84a234967
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
@@ -0,0 +1,589 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package storage holds all cached token information for MSAL. This storage can be
+// augmented with third-party extensions to provide persistent storage. In that case,
+// reads and writes in upper packages will call Marshal() to take the entire in-memory
+// representation and write it to storage and Unmarshal() to update the entire in-memory
+// storage with what was in the persistent storage. The persistent storage can only be
+// accessed in this way because multiple MSAL clients written in multiple languages can
+// access the same storage and must adhere to the same method that was defined
+// previously.
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// aadInstanceDiscoveryer allows faking in tests.
+// It is implemented in production by ops/authority.Client
+type aadInstanceDiscoveryer interface {
+ AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
+}
+
+// TokenResponse mimics a token response that was pulled from the cache.
+type TokenResponse struct {
+ RefreshToken accesstokens.RefreshToken
+ IDToken IDToken // *Credential
+ AccessToken AccessToken
+ Account shared.Account
+}
+
+// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
+// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
+// was given to it on each call.
+type Manager struct {
+ contract *Contract
+ contractMu sync.RWMutex
+ requests aadInstanceDiscoveryer // *oauth.Token
+
+ aadCacheMu sync.RWMutex
+ aadCache map[string]authority.InstanceDiscoveryMetadata
+}
+
+// New is the constructor for Manager.
+func New(requests *oauth.Client) *Manager {
+ m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
+ m.contract = NewContract()
+ return m
+}
+
+func checkAlias(alias string, aliases []string) bool {
+ for _, v := range aliases {
+ if alias == v {
+ return true
+ }
+ }
+ return false
+}
+
+func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
+ newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
+ scopeCounter := 0
+ for _, scope := range scopesOne {
+ for _, otherScope := range newScopesTwo {
+ if strings.EqualFold(scope, otherScope) {
+ scopeCounter++
+ continue
+ }
+ }
+ }
+ return scopeCounter == len(scopesOne)
+}
+
+// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
+// it contains an uppercase character (v1.1+ keys are all lowercase)
+func needsUpgrade(key string) bool {
+ for _, r := range key {
+ if 'A' <= r && r <= 'Z' {
+ return true
+ }
+ }
+ return false
+}
+
+// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
+// the v1.0 item. Callers must hold an exclusive lock on m.
+func upgrade[T any](m map[string]T, k string) T {
+ v1_1Key := strings.ToLower(k)
+ v, ok := m[k]
+ if !ok {
+ // another goroutine did the upgrade while this one was waiting for the write lock
+ return m[v1_1Key]
+ }
+ if v2, ok := m[v1_1Key]; ok {
+ // cache has an equivalent v1.1+ item, which we prefer because we know it was added
+ // by a newer version of the module and is therefore more likely to remain valid.
+ // The v1.0 item may have expired because only v1.0 or earlier would update it.
+ v = v2
+ } else {
+ // add an equivalent item according to the v1.1 schema
+ m[v1_1Key] = v
+ }
+ delete(m, k)
+ return v
+}
+
+// Read reads a storage token from the cache if it exists.
+func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
+ tr := TokenResponse{}
+ homeAccountID := authParameters.HomeAccountID
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ scopes := authParameters.Scopes
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+ tokenType := authParameters.AuthnScheme.AccessTokenType()
+
+ // fetch metadata if instanceDiscovery is enabled
+ aliases := []string{authParameters.AuthorityInfo.Host}
+ if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
+ metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ aliases = metadata.Aliases
+ }
+
+ accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
+ tr.AccessToken = accessToken
+
+ if homeAccountID == "" {
+ // caller didn't specify a user, so there's no reason to search for an ID or refresh token
+ return tr, nil
+ }
+ // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
+ // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
+ idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
+ if err == nil {
+ tr.IDToken = idToken
+ }
+
+ if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
+ // we need the family ID to identify the correct refresh token, if any
+ familyID := appMetadata.FamilyID
+ refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
+ if err == nil {
+ tr.RefreshToken = refreshToken
+ }
+ }
+
+ account, err := m.readAccount(homeAccountID, aliases, realm)
+ if err == nil {
+ tr.Account = account
+ }
+ return tr, nil
+}
+
+const scopeSeparator = " "
+
+// Write writes a token response to the cache and returns the account information the token is stored with.
+func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
+ homeAccountID := tokenResponse.HomeAccountID()
+ environment := authParameters.AuthorityInfo.Host
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+
+ target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
+ cachedAt := time.Now()
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+
+ var account shared.Account
+
+ if len(tokenResponse.RefreshToken) > 0 {
+ refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
+ if err := m.writeRefreshToken(refreshToken); err != nil {
+ return account, err
+ }
+ }
+
+ if len(tokenResponse.AccessToken) > 0 {
+ accessToken := NewAccessToken(
+ homeAccountID,
+ environment,
+ realm,
+ clientID,
+ cachedAt,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
+ tokenResponse.ExtExpiresOn.T,
+ target,
+ tokenResponse.AccessToken,
+ tokenResponse.TokenType,
+ authnSchemeKeyID,
+ )
+
+ // Since we have a valid access token, cache it before moving on.
+ if err := accessToken.Validate(); err == nil {
+ if err := m.writeAccessToken(accessToken); err != nil {
+ return account, err
+ }
+ }
+ }
+
+ idTokenJwt := tokenResponse.IDToken
+ if !idTokenJwt.IsZero() {
+ idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
+ if err := m.writeIDToken(idToken); err != nil {
+ return shared.Account{}, err
+ }
+
+ localAccountID := idTokenJwt.LocalAccountID()
+ authorityType := authParameters.AuthorityInfo.AuthorityType
+
+ preferredUsername := idTokenJwt.UPN
+ if idTokenJwt.PreferredUsername != "" {
+ preferredUsername = idTokenJwt.PreferredUsername
+ }
+
+ account = shared.NewAccount(
+ homeAccountID,
+ environment,
+ realm,
+ localAccountID,
+ authorityType,
+ preferredUsername,
+ )
+ if err := m.writeAccount(account); err != nil {
+ return shared.Account{}, err
+ }
+ }
+
+ AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
+
+ if err := m.writeAppMetaData(AppMetaData); err != nil {
+ return shared.Account{}, err
+ }
+ return account, nil
+}
+
+func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ md, err := m.aadMetadataFromCache(ctx, authorityInfo)
+ if err != nil {
+ // not in the cache, retrieve it
+ md, err = m.aadMetadata(ctx, authorityInfo)
+ }
+ return md, err
+}
+
+func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ m.aadCacheMu.RLock()
+ defer m.aadCacheMu.RUnlock()
+ metadata, ok := m.aadCache[authorityInfo.Host]
+ if ok {
+ return metadata, nil
+ }
+ return metadata, errors.New("not found")
+}
+
+func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ if m.requests == nil {
+ return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil")
+ }
+ m.aadCacheMu.Lock()
+ defer m.aadCacheMu.Unlock()
+ discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return authority.InstanceDiscoveryMetadata{}, err
+ }
+
+ for _, metadataEntry := range discoveryResponse.Metadata {
+ for _, aliasedAuthority := range metadataEntry.Aliases {
+ m.aadCache[aliasedAuthority] = metadataEntry
+ }
+ }
+ if _, ok := m.aadCache[authorityInfo.Host]; !ok {
+ m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
+ PreferredNetwork: authorityInfo.Host,
+ PreferredCache: authorityInfo.Host,
+ }
+ }
+ return m.aadCache[authorityInfo.Host], nil
+}
+
+func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
+ m.contractMu.RLock()
+ // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
+ // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
+ // an issue, however if it does become a problem then we know where to look.
+ for k, at := range m.contract.AccessTokens {
+ if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
+ if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
+ if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ at = upgrade(m.contract.AccessTokens, k)
+ }
+ return at
+ }
+ }
+ }
+ }
+ m.contractMu.RUnlock()
+ return AccessToken{}
+}
+
+func (m *Manager) writeAccessToken(accessToken AccessToken) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ key := accessToken.Key()
+ m.contract.AccessTokens[key] = accessToken
+ return nil
+}
+
+func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
+ byFamily := func(rt accesstokens.RefreshToken) bool {
+ return matchFamilyRefreshToken(rt, homeID, envAliases)
+ }
+ byClient := func(rt accesstokens.RefreshToken) bool {
+ return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
+ }
+
+ var matchers []func(rt accesstokens.RefreshToken) bool
+ if familyID == "" {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byClient, byFamily,
+ }
+ } else {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byFamily, byClient,
+ }
+ }
+
+ // TODO(keegan): All the tests here pass, but Bogdan says this is
+ // more complicated. I'm opening an issue for this to have him
+ // review the tests and suggest tests that would break this so
+ // we can re-write against good tests. His comments as follow:
+ // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
+ // The algorithm is:
+ // If application is NOT part of the family, search by client_ID
+ // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
+ m.contractMu.RLock()
+ for _, matcher := range matchers {
+ for k, rt := range m.contract.RefreshTokens {
+ if matcher(rt) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ rt = upgrade(m.contract.RefreshTokens, k)
+ }
+ return rt, nil
+ }
+ }
+ }
+
+ m.contractMu.RUnlock()
+ return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
+}
+
+func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
+}
+
+func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
+}
+
+func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
+ key := refreshToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.RefreshTokens[key] = refreshToken
+ return nil
+}
+
+func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
+ m.contractMu.RLock()
+ for k, idt := range m.contract.IDTokens {
+ if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
+ if checkAlias(idt.Environment, envAliases) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ idt = upgrade(m.contract.IDTokens, k)
+ }
+ return idt, nil
+ }
+ }
+ }
+ m.contractMu.RUnlock()
+ return IDToken{}, fmt.Errorf("token not found")
+}
+
+func (m *Manager) writeIDToken(idToken IDToken) error {
+ key := idToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.IDTokens[key] = idToken
+ return nil
+}
+
+func (m *Manager) AllAccounts() []shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ var accounts []shared.Account
+ for _, v := range m.contract.Accounts {
+ accounts = append(accounts, v)
+ }
+
+ return accounts
+}
+
+func (m *Manager) Account(homeAccountID string) shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ for _, v := range m.contract.Accounts {
+ if v.HomeAccountID == homeAccountID {
+ return v
+ }
+ }
+
+ return shared.Account{}
+}
+
+func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
+ m.contractMu.RLock()
+
+ // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
+ // We only use a map because the storage contract shared between all language implementations says use a map.
+ // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
+ // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
+ // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
+ // is really low (say 2). Each hash is more expensive than the entire iteration.
+ for k, acc := range m.contract.Accounts {
+ if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ acc = upgrade(m.contract.Accounts, k)
+ }
+ return acc, nil
+ }
+ }
+ m.contractMu.RUnlock()
+ return shared.Account{}, fmt.Errorf("account not found")
+}
+
+func (m *Manager) writeAccount(account shared.Account) error {
+ key := account.Key()
+
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.Accounts[key] = account
+ return nil
+}
+
+func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
+ m.contractMu.RLock()
+ for k, app := range m.contract.AppMetaData {
+ if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ app = upgrade(m.contract.AppMetaData, k)
+ }
+ return app, nil
+ }
+ }
+ m.contractMu.RUnlock()
+ return AppMetaData{}, fmt.Errorf("not found")
+}
+
+func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
+ key := AppMetaData.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.AppMetaData[key] = AppMetaData
+ return nil
+}
+
+// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
+func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
+ m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
+ m.removeAccessTokens(account.HomeAccountID, account.Environment)
+ m.removeIDTokens(account.HomeAccountID, account.Environment)
+ m.removeAccounts(account.HomeAccountID, account.Environment)
+}
+
+func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, rt := range m.contract.RefreshTokens {
+ // Check for RTs associated with the account.
+ if rt.HomeAccountID == homeID && rt.Environment == env {
+ // Do RT's app ownership check as a precaution, in case family apps
+ // and 3rd-party apps share same token cache, although they should not.
+ if rt.ClientID == clientID || rt.FamilyID != "" {
+ delete(m.contract.RefreshTokens, key)
+ }
+ }
+ }
+}
+
+func (m *Manager) removeAccessTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, at := range m.contract.AccessTokens {
+ // Remove AT's associated with the account
+ if at.HomeAccountID == homeID && at.Environment == env {
+ // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
+ // It means ATs for other apps will also be removed, it is OK because:
+ // non-family apps are not supposed to share token cache to begin with;
+ // Even if it happens, we keep other app's RT already, so SSO still works.
+ delete(m.contract.AccessTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeIDTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, idt := range m.contract.IDTokens {
+ // Remove ID tokens associated with the account.
+ if idt.HomeAccountID == homeID && idt.Environment == env {
+ delete(m.contract.IDTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeAccounts(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, acc := range m.contract.Accounts {
+ // Remove the specified account.
+ if acc.HomeAccountID == homeID && acc.Environment == env {
+ delete(m.contract.Accounts, key)
+ }
+ }
+}
+
+// update updates the internal cache object. This is for use in tests, other uses are not
+// supported.
+func (m *Manager) update(cache *Contract) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract = cache
+}
+
+// Marshal implements cache.Marshaler.
+func (m *Manager) Marshal() ([]byte, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ return json.Marshal(m.contract)
+}
+
+// Unmarshal implements cache.Unmarshaler.
+func (m *Manager) Unmarshal(b []byte) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+
+ contract := NewContract()
+
+ err := json.Unmarshal(b, contract)
+ if err != nil {
+ return err
+ }
+
+ m.contract = contract
+
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
index 7b673e3fe..de1bf381f 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
@@ -31,4 +31,6 @@ type TokenProviderResult struct {
AccessToken string
// ExpiresInSeconds is the lifetime of the token in seconds
ExpiresInSeconds int
+ // RefreshInSeconds indicates the suggested time to refresh the token, if any
+ RefreshInSeconds int
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
index 2238521f5..2134e57c9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
@@ -18,10 +18,6 @@ import (
)
const addField = "AdditionalFields"
-const (
- marshalJSON = "MarshalJSON"
- unmarshalJSON = "UnmarshalJSON"
-)
var (
leftBrace = []byte("{")[0]
@@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool {
// hasMarshalJSON will determine if the value or a pointer to this value has
// the MarshalJSON method.
func hasMarshalJSON(v reflect.Value) bool {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
- }
-
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- } else {
- if !v.CanAddr() {
- return false
+ ok := false
+ if _, ok = v.Interface().(json.Marshaler); !ok {
+ var i any
+ if v.Kind() == reflect.Ptr {
+ i = v.Elem().Interface()
+ } else if v.CanAddr() {
+ i = v.Addr().Interface()
}
- v = v.Addr()
- }
-
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
+ _, ok = i.(json.Marshaler)
}
- return false
+ return ok
}
// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value.
// This will panic if the method is not defined.
func callMarshalJSON(v reflect.Value) ([]byte, error) {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
+ if marsh, ok := v.Interface().(json.Marshaler); ok {
return marsh.MarshalJSON()
}
if v.Kind() == reflect.Ptr {
- v = v.Elem()
+ if marsh, ok := v.Elem().Interface().(json.Marshaler); ok {
+ return marsh.MarshalJSON()
+ }
} else {
if v.CanAddr() {
- v = v.Addr()
+ if marsh, ok := v.Addr().Interface().(json.Marshaler); ok {
+ return marsh.MarshalJSON()
+ }
}
}
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
- return marsh.MarshalJSON()
- }
-
panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface()))
}
@@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool {
v = v.Addr()
}
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Unmarshaler)
- return ok
- }
-
- return false
+ _, ok := v.Interface().(json.Unmarshaler)
+ return ok
}
// hasOmitEmpty indicates if the field has instructed us to not output
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
index 04236ff31..c6baf2094 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
@@ -7,6 +7,7 @@ package local
import (
"context"
"fmt"
+ "html"
"net"
"net/http"
"strconv"
@@ -141,11 +142,13 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
headerErr := q.Get("error")
if headerErr != "" {
- desc := q.Get("error_description")
+ desc := html.EscapeString(q.Get("error_description"))
+ escapedHeaderErr := html.EscapeString(headerErr)
// Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
// change this to s.error() and make s.error() write the failPage instead of an error code.
- _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
- s.putResult(Result{Err: fmt.Errorf(desc)})
+ _, _ = w.Write([]byte(fmt.Sprintf(failPage, escapedHeaderErr, desc)))
+ s.putResult(Result{Err: fmt.Errorf("%s", desc)})
+
return
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
index ef8d908a4..738a29eb9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
@@ -10,6 +10,8 @@ import (
"io"
"time"
+ "github.com/google/uuid"
+
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
@@ -18,7 +20,6 @@ import (
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
- "github.com/google/uuid"
)
// ResolveEndpointer contains the methods for resolving authority endpoints.
@@ -110,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
Scopes: scopes,
TenantID: authParams.AuthorityInfo.Tenant,
}
- tr, err := cred.TokenProvider(ctx, params)
+ pr, err := cred.TokenProvider(ctx, params)
if err != nil {
if len(scopes) == 0 {
err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
@@ -118,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
}
return accesstokens.TokenResponse{}, err
}
- return accesstokens.TokenResponse{
- TokenType: authParams.AuthnScheme.AccessTokenType(),
- AccessToken: tr.AccessToken,
- ExpiresOn: internalTime.DurationTime{
- T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
- },
+ tr := accesstokens.TokenResponse{
+ TokenType: authParams.AuthnScheme.AccessTokenType(),
+ AccessToken: pr.AccessToken,
+ ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second),
GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
- }, nil
+ }
+ if pr.RefreshInSeconds > 0 {
+ tr.RefreshOn = internalTime.DurationTime{
+ T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second),
+ }
+ }
+ return tr, nil
}
if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
@@ -331,7 +336,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams
func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error {
endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName)
if err != nil {
- return fmt.Errorf("unable to resolve an endpoint: %s", err)
+ return fmt.Errorf("unable to resolve an endpoint: %w", err)
}
authParams.Endpoints = endpoints
return nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
index a7b7b0742..d738c7591 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -17,6 +17,7 @@ import (
/* #nosec */
"crypto/sha1"
+ "crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
@@ -68,7 +69,7 @@ type DeviceCodeResponse struct {
UserCode string `json:"user_code"`
DeviceCode string `json:"device_code"`
- VerificationURL string `json:"verification_url"`
+ VerificationURL string `json:"verification_uri"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
Message string `json:"message"`
@@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
}
return c.AssertionCallback(ctx, options)
}
-
- token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
+ claims := jwt.MapClaims{
"aud": authParams.Endpoints.TokenEndpoint,
"exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
"iss": authParams.ClientID,
"jti": uuid.New().String(),
"nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
"sub": authParams.ClientID,
- })
+ }
+
+ isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS ||
+ authParams.AuthorityInfo.AuthorityType == authority.DSTS
+
+ var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256
+ thumbprintKey := "x5t#S256"
+
+ if isADFSorDSTS {
+ signingMethod = jwt.SigningMethodRS256
+ thumbprintKey = "x5t"
+ }
+
+ token := jwt.NewWithClaims(signingMethod, claims)
token.Header = map[string]interface{}{
- "alg": "RS256",
- "typ": "JWT",
- "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
+ "alg": signingMethod.Alg(),
+ "typ": "JWT",
+ thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())),
}
if authParams.SendX5C {
@@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
assertion, err := token.SignedString(c.Key)
if err != nil {
- return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
+ return "", fmt.Errorf("unable to sign JWT token: %w", err)
}
+
return assertion, nil
}
// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
// https://tools.ietf.org/html/rfc7517#section-4.8
-func thumbprint(cert *x509.Certificate) []byte {
- /* #nosec */
- a := sha1.Sum(cert.Raw)
- return a[:]
+func thumbprint(cert *x509.Certificate, alg string) []byte {
+ switch alg {
+ case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this
+ hash := sha1.Sum(cert.Raw) /* #nosec */
+ return hash[:]
+ default:
+ hash := sha256.Sum256(cert.Raw)
+ return hash[:]
+ }
}
// Client represents the REST calls to get tokens from token generator backends.
@@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A
qv.Set(clientID, authParameters.ClientID)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromClientSecret(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
@@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth
qv.Set(clientInfo, clientInfoVal)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromAssertion(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
index 3107b45c1..32dde7b76 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"reflect"
+ "strconv"
"strings"
"time"
@@ -173,14 +174,75 @@ type TokenResponse struct {
FamilyID string `json:"foci"`
IDToken IDToken `json:"id_token"`
ClientInfo ClientInfo `json:"client_info"`
- ExpiresOn internalTime.DurationTime `json:"expires_in"`
+ RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"`
+ ExpiresOn time.Time `json:"-"`
ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
GrantedScopes Scopes `json:"scope"`
DeclinedScopes []string // This is derived
AdditionalFields map[string]interface{}
+ scopesComputed bool
+}
+
+func (tr *TokenResponse) UnmarshalJSON(data []byte) error {
+ type Alias TokenResponse
+ aux := &struct {
+ ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"`
+ ExpiresOn any `json:"expires_on,omitempty"`
+ *Alias
+ }{
+ Alias: (*Alias)(tr),
+ }
+
+ // Unmarshal the JSON data into the aux struct
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ // Function to parse different date formats
+ // This is a workaround for the issue described here:
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963
+ parseExpiresOn := func(expiresOn string) (time.Time, error) {
+ var formats = []string{
+ "01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss
+ "2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss
+ time.RFC3339Nano, // ISO 8601 (with nanosecond precision)
+ }
+
+ for _, format := range formats {
+ if t, err := time.Parse(format, expiresOn); err == nil {
+ return t, nil
+ }
+ }
+ return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn)
+ }
- scopesComputed bool
+ if expiresOnStr, ok := aux.ExpiresOn.(string); ok {
+ if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil {
+ tr.ExpiresOn = time.Unix(ts, 0)
+ return nil
+ }
+ if expiresOnStr != "" {
+ if t, err := parseExpiresOn(expiresOnStr); err != nil {
+ return err
+ } else {
+ tr.ExpiresOn = t
+ return nil
+ }
+ }
+ }
+
+ // Check if ExpiresOn is a number (Unix timestamp or ISO 8601)
+ if expiresOnNum, ok := aux.ExpiresOn.(float64); ok {
+ tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0)
+ return nil
+ }
+
+ if !aux.ExpiresIn.T.IsZero() {
+ tr.ExpiresOn = aux.ExpiresIn.T
+ return nil
+ }
+ return errors.New("expires_in and expires_on are both missing or invalid")
}
// ComputeScope computes the final scopes based on what was granted by the server and
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
index 9d60734f8..3f4037464 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
@@ -23,7 +23,7 @@ import (
const (
authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize"
- instanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
+ aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration"
regionName = "REGION_NAME"
defaultAPIVersion = "2021-10-01"
@@ -46,14 +46,15 @@ type jsonCaller interface {
JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error
}
+// For backward compatibility, accept both old and new China endpoints for a transition period.
var aadTrustedHostList = map[string]bool{
- "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
- "login.chinacloudapi.cn": true, // Microsoft Azure China
- "login.microsoftonline.de": true, // Microsoft Azure Blackforest
- "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
- "login.microsoftonline.us": true, // Microsoft Azure US Government
- "login.microsoftonline.com": true, // Microsoft Azure Worldwide
- "login.cloudgovapi.us": true, // Microsoft Azure US Government
+ "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
+ "login.partner.microsoftonline.cn": true, // Microsoft Azure China (new)
+ "login.chinacloudapi.cn": true, // Microsoft Azure China (legacy, backward compatibility)
+ "login.microsoftonline.de": true, // Microsoft Azure Blackforest
+ "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
+ "login.microsoftonline.us": true, // Microsoft Azure US Government
+ "login.microsoftonline.com": true, // Microsoft Azure Worldwide
}
// TrustedHost checks if an AAD host is trusted/valid.
@@ -99,6 +100,41 @@ func (r *TenantDiscoveryResponse) Validate() error {
return nil
}
+// ValidateIssuerMatchesAuthority validates that the issuer in the TenantDiscoveryResponse matches the authority.
+// This is used to identity security or configuration issues in authorities and the OIDC endpoint
+func (r *TenantDiscoveryResponse) ValidateIssuerMatchesAuthority(authorityURI string, aliases map[string]bool) error {
+
+ if authorityURI == "" {
+ return errors.New("TenantDiscoveryResponse: empty authorityURI provided for validation")
+ }
+
+ // Parse the issuer URL
+ issuerURL, err := url.Parse(r.Issuer)
+ if err != nil {
+ return fmt.Errorf("TenantDiscoveryResponse: failed to parse issuer URL: %w", err)
+ }
+
+ // Even if it doesn't match the authority, issuers from known and trusted hosts are valid
+ if aliases != nil && aliases[issuerURL.Host] {
+ return nil
+ }
+
+ // Parse the authority URL for comparison
+ authorityURL, err := url.Parse(authorityURI)
+ if err != nil {
+ return fmt.Errorf("TenantDiscoveryResponse: failed to parse authority URL: %w", err)
+ }
+
+ // Check if the scheme and host match (paths can be ignored when validating the issuer)
+ if issuerURL.Scheme == authorityURL.Scheme && issuerURL.Host == authorityURL.Host {
+ return nil
+ }
+
+ // If we get here, validation failed
+ return fmt.Errorf("TenantDiscoveryResponse: issuer from OIDC discovery '%s' does not match authority '%s' or a known pattern",
+ r.Issuer, authorityURI)
+}
+
type InstanceDiscoveryMetadata struct {
PreferredNetwork string `json:"preferred_network"`
PreferredCache string `json:"preferred_cache"`
@@ -137,8 +173,12 @@ const (
const (
AAD = "MSSTS"
ADFS = "ADFS"
+ DSTS = "DSTS"
)
+// DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it.
+const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f"
+
// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
type AuthenticationScheme interface {
// Extra parameters that are added to the request to the /token endpoint.
@@ -236,23 +276,26 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams {
// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint
// - the resulting authority URL is invalid
func (p AuthParams) WithTenant(ID string) (AuthParams, error) {
- switch ID {
- case "", p.AuthorityInfo.Tenant:
- // keep the default tenant because the caller didn't override it
+ if ID == "" || ID == p.AuthorityInfo.Tenant {
return p, nil
- case "common", "consumers", "organizations":
- if p.AuthorityInfo.AuthorityType == AAD {
+ }
+
+ var authority string
+ switch p.AuthorityInfo.AuthorityType {
+ case AAD:
+ if ID == "common" || ID == "consumers" || ID == "organizations" {
return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID)
}
- // else we'll return a better error below
- }
- if p.AuthorityInfo.AuthorityType != AAD {
- return p, errors.New("the authority doesn't support tenants")
- }
- if p.AuthorityInfo.Tenant == "consumers" {
- return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`)
+ if p.AuthorityInfo.Tenant == "consumers" {
+ return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`)
+ }
+ authority = "https://" + path.Join(p.AuthorityInfo.Host, ID)
+ case ADFS:
+ return p, errors.New("ADFS authority doesn't support tenants")
+ case DSTS:
+ return p, errors.New("dSTS authority doesn't support tenants")
}
- authority := "https://" + path.Join(p.AuthorityInfo.Host, ID)
+
info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled)
if err == nil {
info.Region = p.AuthorityInfo.Region
@@ -344,44 +387,59 @@ type Info struct {
Host string
CanonicalAuthorityURI string
AuthorityType string
- UserRealmURIPrefix string
ValidateAuthority bool
Tenant string
Region string
InstanceDiscoveryDisabled bool
-}
-
-func firstPathSegment(u *url.URL) (string, error) {
- pathParts := strings.Split(u.EscapedPath(), "/")
- if len(pathParts) >= 2 {
- return pathParts[1], nil
- }
-
- return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
+ // InstanceDiscoveryMetadata stores the metadata from AAD instance discovery
+ InstanceDiscoveryMetadata []InstanceDiscoveryMetadata
}
// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided.
func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) {
- u, err := url.Parse(strings.ToLower(authority))
- if err != nil || u.Scheme != "https" {
- return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
+
+ cannonicalAuthority := authority
+
+ // suffix authority with / if it doesn't have one
+ if !strings.HasSuffix(cannonicalAuthority, "/") {
+ cannonicalAuthority += "/"
}
- tenant, err := firstPathSegment(u)
+ u, err := url.Parse(strings.ToLower(cannonicalAuthority))
+
if err != nil {
- return Info{}, err
+ return Info{}, fmt.Errorf("couldn't parse authority url: %w", err)
}
+ if u.Scheme != "https" {
+ return Info{}, errors.New("authority url scheme must be https")
+ }
+
+ pathParts := strings.Split(u.EscapedPath(), "/")
+ if len(pathParts) < 3 {
+ return Info{}, errors.New(`authority must be an URL such as "https://login.microsoftonline.com/"`)
+ }
+
authorityType := AAD
- if tenant == "adfs" {
+ tenant := pathParts[1]
+ switch tenant {
+ case "adfs":
authorityType = ADFS
+ case "dstsv2":
+ if len(pathParts) != 4 {
+ return Info{}, fmt.Errorf("dSTS authority must be an https URL such as https:///dstsv2/%s", DSTSTenant)
+ }
+ if pathParts[2] != DSTSTenant {
+ return Info{}, fmt.Errorf("dSTS authority only accepts a single tenant %q", DSTSTenant)
+ }
+ authorityType = DSTS
+ tenant = DSTSTenant
}
// u.Host includes the port, if any, which is required for private cloud deployments
return Info{
Host: u.Host,
- CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant),
+ CanonicalAuthorityURI: cannonicalAuthority,
AuthorityType: authorityType,
- UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()),
ValidateAuthority: validateAuthority,
Tenant: tenant,
InstanceDiscoveryDisabled: instanceDiscoveryDisabled,
@@ -525,7 +583,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I
discoveryHost = authorityInfo.Host
}
- endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost)
+ endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost)
err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp)
}
return resp, err
@@ -543,17 +601,19 @@ func detectRegion(ctx context.Context) string {
client := http.Client{
Timeout: time.Duration(2 * time.Second),
}
- req, _ := http.NewRequest("GET", imdsEndpoint, nil)
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil)
req.Header.Set("Metadata", "true")
resp, err := client.Do(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
// If the request times out or there is an error, it is retried once
- if err != nil || resp.StatusCode != 200 {
+ if err != nil || resp.StatusCode != http.StatusOK {
resp, err = client.Do(req)
- if err != nil || resp.StatusCode != 200 {
+ if err != nil || resp.StatusCode != http.StatusOK {
return ""
}
}
- defer resp.Body.Close()
response, err := io.ReadAll(resp.Body)
if err != nil {
return ""
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
index 7d9ec7cd3..790680366 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
@@ -18,10 +18,11 @@ import (
"strings"
"time"
+ "github.com/google/uuid"
+
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version"
- "github.com/google/uuid"
)
// HTTPClient represents an HTTP client.
@@ -70,15 +71,13 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
unmarshal = customJSON.Unmarshal
}
- u, err := url.Parse(endpoint)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil)
if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ return fmt.Errorf("could not create request: %w", err)
}
- u.RawQuery = qv.Encode()
addStdHeaders(headers)
-
- req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
+ req.Header = headers
if body != nil {
// Note: In case your wondering why we are not gzip encoding....
@@ -99,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))}
}
}
return nil
@@ -222,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values
}
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))}
}
}
return nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
index 0ade41179..d220a9946 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
@@ -18,16 +18,15 @@ import (
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
)
-// ADFS is an active directory federation service authority type.
-const ADFS = "ADFS"
-
type cacheEntry struct {
Endpoints authority.Endpoints
ValidForDomainsInList map[string]bool
+ // Aliases stores host aliases from instance discovery for quick lookup
+ Aliases map[string]bool
}
func createcacheEntry(endpoints authority.Endpoints) cacheEntry {
- return cacheEntry{endpoints, map[string]bool{}}
+ return cacheEntry{endpoints, map[string]bool{}, map[string]bool{}}
}
// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition.
@@ -51,7 +50,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo
return endpoints, nil
}
- endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName)
+ endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo)
if err != nil {
return authority.Endpoints{}, err
}
@@ -74,16 +73,21 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo
m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints)
+ if err := resp.ValidateIssuerMatchesAuthority(authorityInfo.CanonicalAuthorityURI,
+ m.cache[authorityInfo.CanonicalAuthorityURI].Aliases); err != nil {
+ return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err)
+ }
+
return endpoints, nil
}
-// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false.
+// cachedEndpoints returns the cached endpoints if they exist. If not, we return false.
func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) {
m.mu.Lock()
defer m.mu.Unlock()
if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
- if authorityInfo.AuthorityType == ADFS {
+ if authorityInfo.AuthorityType == authority.ADFS {
domain, err := adfsDomainFromUpn(userPrincipalName)
if err == nil {
if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok {
@@ -102,7 +106,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use
updatedCacheEntry := createcacheEntry(endpoints)
- if authorityInfo.AuthorityType == ADFS {
+ if authorityInfo.AuthorityType == authority.ADFS {
// Since we're here, we've made a call to the backend. We want to ensure we're caching
// the latest values from the server.
if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
@@ -116,25 +120,36 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use
}
}
+ // Extract aliases from instance discovery metadata and add to cache
+ for _, metadata := range authorityInfo.InstanceDiscoveryMetadata {
+ for _, alias := range metadata.Aliases {
+ updatedCacheEntry.Aliases[alias] = true
+ }
+ }
+
m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry
}
-func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) {
- if authorityInfo.Tenant == "adfs" {
+func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) {
+ if authorityInfo.AuthorityType == authority.ADFS {
return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil
+ } else if authorityInfo.AuthorityType == authority.DSTS {
+ return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil
+
} else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) {
resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
if err != nil {
return "", err
}
+ authorityInfo.InstanceDiscoveryMetadata = resp.Metadata
return resp.TenantDiscoveryEndpoint, nil
} else if authorityInfo.Region != "" {
resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
if err != nil {
return "", err
}
+ authorityInfo.InstanceDiscoveryMetadata = resp.Metadata
return resp.TenantDiscoveryEndpoint, nil
-
}
return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
index eb16b405c..5e551abc8 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
@@ -5,4 +5,4 @@
package version
// Version is the version of this client package that is communicated to the server.
-const Version = "1.2.0"
+const Version = "1.4.2"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
new file mode 100644
index 000000000..d7cffc295
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
@@ -0,0 +1,28 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("secret", os.Getenv(msiSecretEnvVar))
+ q := req.URL.Query()
+ q.Set(apiVersionQueryParameterName, azureMLAPIVersion)
+ q.Set(resourceQueryParameterName, resource)
+ q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID"))
+ if cid, ok := id.(UserAssignedClientID); ok {
+ q.Set("clientid", string(cid))
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
new file mode 100644
index 000000000..be9a0bca3
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
@@ -0,0 +1,37 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiEndpointParsed, err := url.Parse(msiEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err)
+ }
+
+ data := url.Values{}
+ data.Set(resourceQueryParameterName, resource)
+ msiDataEncoded := data.Encode()
+ body := io.NopCloser(strings.NewReader(msiDataEncoded))
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+
+ req.Header.Set(metaHTTPHeaderName, "true")
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
new file mode 100644
index 000000000..ca3de4325
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
@@ -0,0 +1,717 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package managedidentity provides a client for retrieval of Managed Identity applications.
+The Managed Identity Client is used to acquire a token for managed identity assigned to
+an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token
+without using credentials.
+*/
+package managedidentity
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// AuthResult contains the results of one token acquisition operation.
+// For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult = base.AuthResult
+
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
+const (
+ // DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set.
+ DefaultToIMDS Source = "DefaultToIMDS"
+ AzureArc Source = "AzureArc"
+ ServiceFabric Source = "ServiceFabric"
+ CloudShell Source = "CloudShell"
+ AzureML Source = "AzureML"
+ AppService Source = "AppService"
+
+ // General request query parameter names
+ metaHTTPHeaderName = "Metadata"
+ apiVersionQueryParameterName = "api-version"
+ resourceQueryParameterName = "resource"
+ wwwAuthenticateHeaderName = "www-authenticate"
+
+ // UAMI query parameter name
+ miQueryParameterClientId = "client_id"
+ miQueryParameterObjectId = "object_id"
+ miQueryParameterPrincipalId = "principal_id"
+ miQueryParameterResourceIdIMDS = "msi_res_id"
+ miQueryParameterResourceId = "mi_res_id"
+
+ // IMDS
+ imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+ imdsAPIVersion = "2018-02-01"
+ systemAssignedManagedIdentity = "system_assigned_managed_identity"
+
+ // Azure Arc
+ azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token"
+ azureArcAPIVersion = "2020-06-01"
+ azureArcFileExtension = ".key"
+ azureArcMaxFileSizeBytes int64 = 4096
+ linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101
+ linuxHimdsPath = "/opt/azcmagent/bin/himds"
+ azureConnectedMachine = "AzureConnectedMachineAgent"
+ himdsExecutableName = "himds.exe"
+ tokenName = "Tokens"
+
+ // App Service
+ appServiceAPIVersion = "2019-08-01"
+
+ // AzureML
+ azureMLAPIVersion = "2017-09-01"
+ // Service Fabric
+ serviceFabricAPIVersion = "2019-07-01-preview"
+
+ // Environment Variables
+ identityEndpointEnvVar = "IDENTITY_ENDPOINT"
+ identityHeaderEnvVar = "IDENTITY_HEADER"
+ azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST"
+ imdsEndVar = "IMDS_ENDPOINT"
+ msiEndpointEnvVar = "MSI_ENDPOINT"
+ msiSecretEnvVar = "MSI_SECRET"
+ identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT"
+
+ defaultRetryCount = 3
+)
+
+var retryCodesForIMDS = []int{
+ http.StatusNotFound, // 404
+ http.StatusGone, // 410
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusNotImplemented, // 501
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ http.StatusHTTPVersionNotSupported, // 505
+ http.StatusVariantAlsoNegotiates, // 506
+ http.StatusInsufficientStorage, // 507
+ http.StatusLoopDetected, // 508
+ http.StatusNotExtended, // 510
+ http.StatusNetworkAuthenticationRequired, // 511
+}
+
+var retryStatusCodes = []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+}
+
+var getAzureArcPlatformPath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName)
+ case "linux":
+ return linuxTokenPath
+ default:
+ return ""
+ }
+}
+
+var getAzureArcHimdsFilePath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName)
+ case "linux":
+ return linuxHimdsPath
+ default:
+ return ""
+ }
+}
+
+type Source string
+
+type ID interface {
+ value() string
+}
+
+type systemAssignedValue string // its private for a reason to make the input consistent.
+type UserAssignedClientID string
+type UserAssignedObjectID string
+type UserAssignedResourceID string
+
+func (s systemAssignedValue) value() string { return string(s) }
+func (c UserAssignedClientID) value() string { return string(c) }
+func (o UserAssignedObjectID) value() string { return string(o) }
+func (r UserAssignedResourceID) value() string { return string(r) }
+func SystemAssigned() ID {
+ return systemAssignedValue(systemAssignedManagedIdentity)
+}
+
+// cache never uses the client because instance discovery is always disabled.
+var cacheManager *storage.Manager = storage.New(nil)
+
+type Client struct {
+ httpClient ops.HTTPClient
+ miType ID
+ source Source
+ authParams authority.AuthParams
+ retryPolicyEnabled bool
+ canRefresh *atomic.Value
+}
+
+type AcquireTokenOptions struct {
+ claims string
+}
+
+type ClientOption func(*Client)
+
+type AcquireTokenOption func(o *AcquireTokenOptions)
+
+// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies.
+// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
+func WithClaims(claims string) AcquireTokenOption {
+ return func(o *AcquireTokenOptions) {
+ o.claims = claims
+ }
+}
+
+// WithHTTPClient allows for a custom HTTP client to be set.
+func WithHTTPClient(httpClient ops.HTTPClient) ClientOption {
+ return func(c *Client) {
+ c.httpClient = httpClient
+ }
+}
+
+func WithRetryPolicyDisabled() ClientOption {
+ return func(c *Client) {
+ c.retryPolicyEnabled = false
+ }
+}
+
+// Client to be used to acquire tokens for managed identity.
+// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID]
+//
+// Options: [WithHTTPClient]
+func New(id ID, options ...ClientOption) (Client, error) {
+ source, err := GetSource()
+ if err != nil {
+ return Client{}, err
+ }
+
+ // Check for user-assigned restrictions based on the source
+ switch source {
+ case AzureArc:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities")
+ }
+ case AzureML:
+ switch id.(type) {
+ case UserAssignedObjectID, UserAssignedResourceID:
+ return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only")
+ }
+ case CloudShell:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities")
+ }
+ case ServiceFabric:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
+ }
+ }
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedResourceID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedObjectID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case systemAssignedValue:
+ default:
+ return Client{}, fmt.Errorf("unsupported type %T", id)
+ }
+ zero := atomic.Value{}
+ zero.Store(false)
+ client := Client{
+ miType: id,
+ httpClient: shared.DefaultClient,
+ retryPolicyEnabled: true,
+ source: source,
+ canRefresh: &zero,
+ }
+ for _, option := range options {
+ option(&client)
+ }
+ fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true)
+ if err != nil {
+ return Client{}, err
+ }
+ client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo)
+ return client, nil
+}
+
+// GetSource detects and returns the managed identity source available on the environment.
+func GetSource() (Source, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ identityHeader := os.Getenv(identityHeaderEnvVar)
+ identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar)
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiSecret := os.Getenv(msiSecretEnvVar)
+ imdsEndpoint := os.Getenv(imdsEndVar)
+
+ if identityEndpoint != "" && identityHeader != "" {
+ if identityServerThumbprint != "" {
+ return ServiceFabric, nil
+ }
+ return AppService, nil
+ } else if msiEndpoint != "" {
+ if msiSecret != "" {
+ return AzureML, nil
+ } else {
+ return CloudShell, nil
+ }
+ } else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) {
+ return AzureArc, nil
+ }
+
+ return DefaultToIMDS, nil
+}
+
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var now = time.Now
+
+// Acquires tokens from the configured managed identity on an azure resource.
+//
+// Resource: scopes application is requesting access to
+// Options: [WithClaims]
+func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) {
+ resource = strings.TrimSuffix(resource, "/.default")
+ o := AcquireTokenOptions{}
+ for _, option := range options {
+ option(&o)
+ }
+ c.authParams.Scopes = []string{resource}
+
+ // ignore cached access tokens when given claims
+ if o.claims == "" {
+ stResp, err := cacheManager.Read(ctx, c.authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar, err := base.AuthResultFromStorage(stResp)
+ if err == nil {
+ if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) {
+ defer c.canRefresh.Store(false)
+ if tr, er := c.getToken(ctx, resource); er == nil {
+ return tr, nil
+ }
+ }
+ ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+ }
+ }
+ return c.getToken(ctx, resource)
+}
+
+func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) {
+ switch c.source {
+ case AzureArc:
+ return c.acquireTokenForAzureArc(ctx, resource)
+ case AzureML:
+ return c.acquireTokenForAzureML(ctx, resource)
+ case CloudShell:
+ return c.acquireTokenForCloudShell(ctx, resource)
+ case DefaultToIMDS:
+ return c.acquireTokenForIMDS(ctx, resource)
+ case AppService:
+ return c.acquireTokenForAppService(ctx, resource)
+ case ServiceFabric:
+ return c.acquireTokenForServiceFabric(ctx, resource)
+ default:
+ return AuthResult{}, fmt.Errorf("unsupported source %q", c.source)
+ }
+}
+
+func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAppServiceAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createIMDSAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createCloudShellAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureMLAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createServiceFabricAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureArcAuthRequest(ctx, resource, "")
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ response, err := c.httpClient.Do(req)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ defer response.Body.Close()
+
+ if response.StatusCode != http.StatusUnauthorized {
+ return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode)
+ }
+
+ secret, err := c.getAzureArcSecretKey(response, runtime.GOOS)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret))
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ tokenResponse, err := c.getTokenForRequest(secondRequest, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
+ if cacheManager == nil {
+ return AuthResult{}, errors.New("cache instance is nil")
+ }
+ account, err := cacheManager.Write(authParams, token)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ // if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away
+ if token.RefreshOn.T.IsZero() {
+ if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour {
+ token.RefreshOn.T = time.Now().Add(lifetime / 2)
+ }
+ }
+ ar, err := base.NewAuthResult(token, account)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+}
+
+// contains checks if the element is present in the list.
+func contains[T comparable](list []T, element T) bool {
+ for _, v := range list {
+ if v == element {
+ return true
+ }
+ }
+ return false
+}
+
+// retry performs an HTTP request with retries based on the provided options.
+func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute)
+ defer tryCancel()
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+ cloneReq := req.Clone(tryCtx)
+ resp, err = c.httpClient.Do(cloneReq)
+ retrylist := retryStatusCodes
+ if c.source == DefaultToIMDS {
+ retrylist = retryCodesForIMDS
+ }
+ if err == nil && !contains(retrylist, resp.StatusCode) {
+ return resp, nil
+ }
+ select {
+ case <-time.After(time.Second):
+ case <-req.Context().Done():
+ err = req.Context().Err()
+ return resp, err
+ }
+ }
+ return resp, err
+}
+
+func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) {
+ r := accesstokens.TokenResponse{}
+ var resp *http.Response
+ var err error
+
+ if c.retryPolicyEnabled {
+ resp, err = c.retry(defaultRetryCount, req)
+ } else {
+ resp, err = c.httpClient.Do(req)
+ }
+ if err != nil {
+ return r, err
+ }
+ responseBytes, err := io.ReadAll(resp.Body)
+ defer resp.Body.Close()
+ if err != nil {
+ return r, err
+ }
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusAccepted:
+ default:
+ sd := strings.TrimSpace(string(responseBytes))
+ if sd != "" {
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s",
+ req.URL.String(),
+ req.Method,
+ resp.StatusCode,
+ sd),
+ }
+ }
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode),
+ }
+ }
+
+ err = json.Unmarshal(responseBytes, &r)
+ if err != nil {
+ return r, errors.InvalidJsonErr{
+ Err: fmt.Errorf("error parsing the json error: %s", err),
+ }
+ }
+ r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource)
+
+ return r, err
+}
+
+func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", appServiceAPIVersion)
+ q.Set("resource", resource)
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ q.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ q.Set(miQueryParameterResourceId, string(t))
+ case UserAssignedObjectID:
+ q.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue:
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
+
+func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ msiEndpoint, err := url.Parse(imdsDefaultEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err)
+ }
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ msiParameters.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ msiParameters.Set(miQueryParameterResourceIdIMDS, string(t))
+ case UserAssignedObjectID:
+ msiParameters.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue: // not adding anything
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+ return req, nil
+}
+
+func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ if identityEndpoint == "" {
+ identityEndpoint = azureArcEndpoint
+ }
+ msiEndpoint, parseErr := url.Parse(identityEndpoint)
+
+ if parseErr != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr)
+ }
+
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+
+ if key != "" {
+ req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
+ }
+
+ return req, nil
+}
+
+func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool {
+ if identityEndpoint != "" && imdsEndpoint != "" {
+ return true
+ }
+ himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS)
+ if himdsFilePath != "" {
+ if _, err := os.Stat(himdsFilePath); err == nil {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) {
+ wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName)
+
+ if len(wwwAuthenticateHeader) == 0 {
+ return "", errors.New("response has no www-authenticate header")
+ }
+
+ // check if the platform is supported
+ expectedSecretFilePath := getAzureArcPlatformPath(platform)
+ if expectedSecretFilePath == "" {
+ return "", errors.New("platform not supported, expected linux or windows")
+ }
+
+ parts := strings.Split(wwwAuthenticateHeader, "Basic realm=")
+ if len(parts) < 2 {
+ return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader)
+ }
+
+ secretFilePath := parts
+
+ // check that the file in the file path is a .key file
+ fileName := filepath.Base(secretFilePath[1])
+ if !strings.HasSuffix(fileName, azureArcFileExtension) {
+ return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName))
+ }
+
+ // check that file path from header matches the expected file path for the platform
+ if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) {
+ return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1]))
+ }
+
+ fileInfo, err := os.Stat(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err)
+ }
+
+ // Throw an error if the secret file's size is greater than 4096 bytes
+ if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes {
+ return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s)
+ }
+
+ // Attempt to read the contents of the secret file
+ secret, err := os.ReadFile(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err)
+ }
+
+ return string(secret), nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
new file mode 100644
index 000000000..535065e9d
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
@@ -0,0 +1,25 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", serviceFabricAPIVersion)
+ q.Set("resource", resource)
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
index 392e5e43f..7beed2617 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
// clientOptions configures the Client's behavior.
@@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type DeviceCodeResult = accesstokens.DeviceCodeResult
@@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error
if err != nil {
return AuthResult{}, err
}
- return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
+ return d.client.base.AuthResultFromToken(ctx, d.authParams, token)
}
// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
@@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type interactiveAuthResult struct {
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
new file mode 100644
index 000000000..9515ee520
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
@@ -0,0 +1,3 @@
+# GCP Resource detection library
+
+This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
new file mode 100644
index 000000000..9ce7d96fe
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import "context"
+
+const (
+ // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules
+ // for the environment variables available in GAE environments.
+ gaeServiceEnv = "GAE_SERVICE"
+ gaeVersionEnv = "GAE_VERSION"
+ gaeInstanceEnv = "GAE_INSTANCE"
+ gaeEnv = "GAE_ENV"
+ gaeStandard = "standard"
+)
+
+func (d *Detector) onAppEngineStandard() bool {
+ // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables.
+ env, found := d.os.LookupEnv(gaeEnv)
+ return found && env == gaeStandard
+}
+
+func (d *Detector) onAppEngine() bool {
+ _, found := d.os.LookupEnv(gaeServiceEnv)
+ return found
+}
+
+// AppEngineServiceName returns the service name of the app engine service.
+func (d *Detector) AppEngineServiceName() (string, error) {
+ if name, found := d.os.LookupEnv(gaeServiceEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceVersion returns the service version of the app engine service.
+func (d *Detector) AppEngineServiceVersion() (string, error) {
+ if version, found := d.os.LookupEnv(gaeVersionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceInstance returns the service instance of the app engine service.
+func (d *Detector) AppEngineServiceInstance() (string, error) {
+ if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) {
+ // The GCE metadata server is available on App Engine Flex.
+ return d.GCEAvailabilityZoneAndRegion()
+}
+
+// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in.
+func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) {
+ return d.metadata.ZoneWithContext(context.TODO())
+}
+
+// AppEngineStandardCloudRegion returns the region the app engine service is running in.
+func (d *Detector) AppEngineStandardCloudRegion() (string, error) {
+ return d.FaaSCloudRegion()
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
new file mode 100644
index 000000000..d3992a4f7
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ bmsProjectIDEnv = "BMS_PROJECT_ID"
+ bmsRegionEnv = "BMS_REGION"
+ bmsInstanceIDEnv = "BMS_INSTANCE_ID"
+)
+
+// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
+// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
+// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
+func (d *Detector) onBareMetalSolution() bool {
+ projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
+ region, regionExists := d.os.LookupEnv(bmsRegionEnv)
+ instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
+ return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
+}
+
+// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
+func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
+ if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
+func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
+ if region, found := d.os.LookupEnv(bmsRegionEnv); found {
+ return region, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
+func (d *Detector) BareMetalSolutionProjectID() (string, error) {
+ if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
+ return project, nil
+ }
+ return "", errEnvVarNotFound
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
new file mode 100644
index 000000000..4eac3c74b
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
@@ -0,0 +1,101 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+var errEnvVarNotFound = errors.New("environment variable not found")
+
+// NewDetector returns a *Detector which can get detect the platform,
+// and fetch attributes of the platform on which it is running.
+func NewDetector() *Detector {
+ return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}}
+}
+
+type Platform int64
+
+const (
+ UnknownPlatform Platform = iota
+ GKE
+ GCE
+ CloudRun
+ CloudRunJob
+ CloudFunctions
+ AppEngineStandard
+ AppEngineFlex
+ BareMetalSolution
+)
+
+// CloudPlatform returns the platform on which this program is running.
+func (d *Detector) CloudPlatform() Platform {
+ switch {
+ case d.onBareMetalSolution():
+ return BareMetalSolution
+ case d.onGKE():
+ return GKE
+ case d.onCloudFunctions():
+ return CloudFunctions
+ case d.onCloudRun():
+ return CloudRun
+ case d.onCloudRunJob():
+ return CloudRunJob
+ case d.onAppEngineStandard():
+ return AppEngineStandard
+ case d.onAppEngine():
+ return AppEngineFlex
+ case d.onGCE():
+ return GCE
+ }
+ return UnknownPlatform
+}
+
+// ProjectID returns the ID of the project in which this program is running.
+func (d *Detector) ProjectID() (string, error) {
+ // N.B. d.metadata.ProjectIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable.
+ s, err := d.metadata.GetWithContext(context.TODO(), "project/project-id")
+ return strings.TrimSpace(s), err
+}
+
+// instanceID returns the ID of the project in which this program is running.
+func (d *Detector) instanceID() (string, error) {
+ // N.B. d.metadata.InstanceIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable.
+ s, err := d.metadata.GetWithContext(context.TODO(), "instance/id")
+ return strings.TrimSpace(s), err
+}
+
+// Detector collects resource information for all GCP platforms.
+type Detector struct {
+ metadata *metadata.Client
+ os osProvider
+}
+
+// osProvider contains the subset of the os package functions used by.
+type osProvider interface {
+ LookupEnv(string) (string, bool)
+}
+
+// realOSProvider uses the os package to lookup env vars.
+type realOSProvider struct{}
+
+func (realOSProvider) LookupEnv(env string) (string, bool) {
+ return os.LookupEnv(env)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
new file mode 100644
index 000000000..f137b1fae
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
@@ -0,0 +1,106 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "strings"
+)
+
+const (
+ // Cloud Functions env vars:
+ // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes
+ //
+ // Cloud Run env vars:
+ // https://cloud.google.com/run/docs/container-contract#services-env-vars
+ //
+ // Cloud Run jobs env vars:
+ // https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ cloudFunctionsTargetEnv = "FUNCTION_TARGET"
+ cloudRunConfigurationEnv = "K_CONFIGURATION"
+ cloudRunJobsEnv = "CLOUD_RUN_JOB"
+ faasServiceEnv = "K_SERVICE"
+ faasRevisionEnv = "K_REVISION"
+ cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION"
+ cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX"
+ regionMetadataAttr = "instance/region"
+)
+
+func (d *Detector) onCloudFunctions() bool {
+ _, found := d.os.LookupEnv(cloudFunctionsTargetEnv)
+ return found
+}
+
+func (d *Detector) onCloudRun() bool {
+ _, found := d.os.LookupEnv(cloudRunConfigurationEnv)
+ return found
+}
+
+func (d *Detector) onCloudRunJob() bool {
+ _, found := d.os.LookupEnv(cloudRunJobsEnv)
+ return found
+}
+
+// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service.
+func (d *Detector) FaaSName() (string, error) {
+ if name, found := d.os.LookupEnv(faasServiceEnv); found {
+ return name, nil
+ }
+ if name, found := d.os.LookupEnv(cloudRunJobsEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service.
+func (d *Detector) FaaSVersion() (string, error) {
+ if version, found := d.os.LookupEnv(faasRevisionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobExecution returns the execution id of the Cloud Run jobs.
+func (d *Detector) CloudRunJobExecution() (string, error) {
+ if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found {
+ return eid, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs.
+func (d *Detector) CloudRunJobTaskIndex() (string, error) {
+ if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found {
+ return tidx, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSID returns the instance id of the Cloud Run or Cloud Function.
+func (d *Detector) FaaSID() (string, error) {
+ return d.instanceID()
+}
+
+// FaaSCloudRegion detects region from the metadata server.
+// It is in the format /projects//regions/.
+//
+// https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+func (d *Detector) FaaSCloudRegion() (string, error) {
+ region, err := d.metadata.GetWithContext(context.TODO(), regionMetadataAttr)
+ if err != nil {
+ return "", err
+ }
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
new file mode 100644
index 000000000..794cfdf03
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
@@ -0,0 +1,117 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+// See the available GCE instance metadata:
+// https://cloud.google.com/compute/docs/metadata/predefined-metadata-keys#instance-metadata
+const machineTypeMetadataAttr = "instance/machine-type"
+
+// https://cloud.google.com/compute/docs/instance-groups/getting-info-about-migs#checking_if_a_vm_instance_is_part_of_a_mig
+const createdByInstanceAttr = "created-by"
+
+func (d *Detector) onGCE() bool {
+ _, err := d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr)
+ return err == nil
+}
+
+// GCEHostType returns the machine type of the instance on which this program is running.
+func (d *Detector) GCEHostType() (string, error) {
+ return d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr)
+}
+
+// GCEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GCEHostID() (string, error) {
+ return d.instanceID()
+}
+
+// GCEHostName returns the instance name of the instance on which this program is running.
+// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which
+// value is returned.
+func (d *Detector) GCEHostName() (string, error) {
+ return d.metadata.InstanceNameWithContext(context.TODO())
+}
+
+// GCEInstanceName returns the instance name of the instance on which this program is running.
+// This is the value visible in the Cloud Console UI, and the prefix for the default hostname
+// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func (d *Detector) GCEInstanceName() (string, error) {
+ return d.metadata.InstanceNameWithContext(context.TODO())
+}
+
+// GCEInstanceHostname returns the full value of the default or custom hostname of the instance
+// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm.
+func (d *Detector) GCEInstanceHostname() (string, error) {
+ return d.metadata.HostnameWithContext(context.TODO())
+}
+
+// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) {
+ zone, err := d.metadata.ZoneWithContext(context.TODO())
+ if err != nil {
+ return "", "", err
+ }
+ if zone == "" {
+ return "", "", fmt.Errorf("no zone detected from GCE metadata server")
+ }
+ splitZone := strings.SplitN(zone, "-", 3)
+ if len(splitZone) != 3 {
+ return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone)
+ }
+ return zone, strings.Join(splitZone[0:2], "-"), nil
+}
+
+type ManagedInstanceGroup struct {
+ Name string
+ Location string
+ Type LocationType
+}
+
+var createdByMIGRE = regexp.MustCompile(`^projects/[^/]+/(zones|regions)/([^/]+)/instanceGroupManagers/([^/]+)$`)
+
+func (d *Detector) GCEManagedInstanceGroup() (ManagedInstanceGroup, error) {
+ createdBy, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), createdByInstanceAttr)
+ if _, ok := err.(metadata.NotDefinedError); ok {
+ return ManagedInstanceGroup{}, nil
+ } else if err != nil {
+ return ManagedInstanceGroup{}, err
+ }
+ matches := createdByMIGRE.FindStringSubmatch(createdBy)
+ if matches == nil {
+ // The "created-by" key exists, but it doesn't describe a MIG.
+ // Something else must have created this VM.
+ return ManagedInstanceGroup{}, nil
+ }
+
+ mig := ManagedInstanceGroup{
+ Name: matches[3],
+ Location: matches[2],
+ }
+ switch matches[1] {
+ case "zones":
+ mig.Type = Zone
+ case "regions":
+ mig.Type = Region
+ }
+ return mig, nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
new file mode 100644
index 000000000..734d44cc0
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "fmt"
+ "strings"
+)
+
+const (
+ // If the kubernetes.default.svc service exists in the cluster,
+ // then the KUBERNETES_SERVICE_HOST env var will be populated.
+ // Use this as an indication that we are running on kubernetes.
+ k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST"
+ // See the available GKE metadata:
+ // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata
+ clusterNameMetadataAttr = "cluster-name"
+ clusterLocationMetadataAttr = "cluster-location"
+)
+
+func (d *Detector) onGKE() bool {
+ // Check if we are on k8s first
+ _, found := d.os.LookupEnv(k8sServiceHostEnv)
+ if !found {
+ return false
+ }
+ // If we are on k8s, make sure that we are actually on GKE, and not a
+ // different managed k8s platform.
+ _, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr)
+ return err == nil
+}
+
+// GKEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GKEHostID() (string, error) {
+ return d.GCEHostID()
+}
+
+// GKEClusterName returns the name if the GKE cluster in which this program is running.
+func (d *Detector) GKEClusterName() (string, error) {
+ return d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterNameMetadataAttr)
+}
+
+type LocationType int64
+
+const (
+ UndefinedLocation LocationType = iota
+ Zone
+ Region
+)
+
+// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional.
+func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) {
+ clusterLocation, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr)
+ if err != nil {
+ return "", UndefinedLocation, err
+ }
+ switch strings.Count(clusterLocation, "-") {
+ case 1:
+ return clusterLocation, Region, nil
+ case 2:
+ return clusterLocation, Zone, nil
+ default:
+ return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation)
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
new file mode 100644
index 000000000..ea391705f
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
@@ -0,0 +1,44 @@
+# OpenTelemetry Google Cloud Monitoring Exporter
+
+[](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric)
+[![Apache License][license-image]][license-url]
+
+OpenTelemetry Google Cloud Monitoring Exporter allows the user to send collected metrics to Google Cloud.
+
+To get started with instrumentation in Google Cloud, see [Generate traces and metrics with
+Go](https://cloud.google.com/stackdriver/docs/instrumentation/setup/go).
+
+To learn more about instrumentation and observability, including opinionated recommendations
+for Google Cloud Observability, visit [Instrumentation and
+observability](https://cloud.google.com/stackdriver/docs/instrumentation/overview).
+
+[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more.
+
+## Setup
+
+Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create).
+
+## Authentication
+
+The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`:
+
+* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
+* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`.
+
+When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.:
+
+```golang
+projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
+opts := []mexporter.Option{
+ mexporter.WithProjectID(projectID),
+}
+```
+
+## Useful links
+
+* For more information on OpenTelemetry, visit: https://opentelemetry.io/
+* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go
+* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring
+
+[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE
+[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
new file mode 100644
index 000000000..90dfcb344
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
@@ -0,0 +1,49 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "golang.org/x/oauth2/google"
+)
+
+// New creates a new Exporter thats implements metric.Exporter.
+func New(opts ...Option) (sdkmetric.Exporter, error) {
+ o := options{
+ context: context.Background(),
+ resourceAttributeFilter: DefaultResourceAttributesFilter,
+ }
+ for _, opt := range opts {
+ opt(&o)
+ }
+
+ if o.projectID == "" {
+ creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("google cloud monitoring: no project found with application default credentials")
+ }
+ o.projectID = creds.ProjectID
+ }
+ return newMetricExporter(&o)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
new file mode 100644
index 000000000..57329a4bd
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
@@ -0,0 +1,97 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// TODO: remove this file when the constants are ready in the Go SDK
+
+// Mappings for the well-known OpenTelemetry resource label keys
+// to applicable Monitored Resource label keys.
+// A uniquely identifying name for the Kubernetes cluster. Kubernetes
+// does not have cluster names as an internal concept so this may be
+// set to any meaningful value within the environment. For example,
+// GKE clusters have a name which can be used for this label.
+const (
+ // Deprecated: use semconv.CloudProviderKey instead.
+ CloudKeyProvider = "cloud.provider"
+ // Deprecated: use semconv.CloudAccountIDKey instead.
+ CloudKeyAccountID = "cloud.account.id"
+ // Deprecated: use semconv.CloudRegionKey instead.
+ CloudKeyRegion = "cloud.region"
+ // Deprecated: use semconv.CloudAvailabilityZoneKey instead.
+ CloudKeyZone = "cloud.availability_zone"
+
+ // Deprecated: use semconv.ServiceNamespaceKey instead.
+ ServiceKeyNamespace = "service.namespace"
+ // Deprecated: use semconv.ServiceInstanceIDKey instead.
+ ServiceKeyInstanceID = "service.instance.id"
+ // Deprecated: use semconv.ServiceNameKey instead.
+ ServiceKeyName = "service.name"
+
+ // Deprecated: HostType is not needed.
+ HostType = "host"
+ // A uniquely identifying name for the host.
+ // Deprecated: use semconv.HostNameKey instead.
+ HostKeyName = "host.name"
+ // A hostname as returned by the 'hostname' command on host machine.
+ // Deprecated: HostKeyHostName is not needed.
+ HostKeyHostName = "host.hostname"
+ // Deprecated: use semconv.HostIDKey instead.
+ HostKeyID = "host.id"
+ // Deprecated: use semconv.HostTypeKey instead.
+ HostKeyType = "host.type"
+
+ // A uniquely identifying name for the Container.
+ // Deprecated: use semconv.ContainerNameKey instead.
+ ContainerKeyName = "container.name"
+ // Deprecated: use semconv.ContainerImageNameKey instead.
+ ContainerKeyImageName = "container.image.name"
+ // Deprecated: use semconv.ContainerImageTagKey instead.
+ ContainerKeyImageTag = "container.image.tag"
+
+ // Cloud Providers
+ // Deprecated: use semconv.CloudProviderAWS instead.
+ CloudProviderAWS = "aws"
+ // Deprecated: use semconv.CloudProviderGCP instead.
+ CloudProviderGCP = "gcp"
+ // Deprecated: use semconv.CloudProviderAzure instead.
+ CloudProviderAZURE = "azure"
+
+ // Deprecated: Use "k8s" instead. This should not be needed.
+ K8S = "k8s"
+ // Deprecated: use semconv.K8SClusterNameKey instead.
+ K8SKeyClusterName = "k8s.cluster.name"
+ // Deprecated: use semconv.K8SNamespaceNameKey instead.
+ K8SKeyNamespaceName = "k8s.namespace.name"
+ // Deprecated: use semconv.K8SPodNameKey instead.
+ K8SKeyPodName = "k8s.pod.name"
+ // Deprecated: use semconv.K8SDeploymentNameKey instead.
+ K8SKeyDeploymentName = "k8s.deployment.name"
+
+ // Monitored Resources types
+ // Deprecated: Use "k8s_container" instead.
+ K8SContainer = "k8s_container"
+ // Deprecated: Use "k8s_node" instead.
+ K8SNode = "k8s_node"
+ // Deprecated: Use "k8s_pod" instead.
+ K8SPod = "k8s_pod"
+ // Deprecated: Use "k8s_cluster" instead.
+ K8SCluster = "k8s_cluster"
+ // Deprecated: Use "gce_instance" instead.
+ GCEInstance = "gce_instance"
+ // Deprecated: Use "aws_ec2_instance" instead.
+ AWSEC2Instance = "aws_ec2_instance"
+ // Deprecated: Use "generic_task" instead.
+ GenericTask = "generic_task"
+)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
new file mode 100644
index 000000000..974c0af95
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
@@ -0,0 +1,32 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+type errUnexpectedAggregationKind struct {
+ kind string
+}
+
+func (e errUnexpectedAggregationKind) Error() string {
+ return fmt.Sprintf("the metric kind is unexpected: %v", e.kind)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
new file mode 100644
index 000000000..b0ab713c6
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
@@ -0,0 +1,890 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/option"
+ "google.golang.org/genproto/googleapis/api/distribution"
+ "google.golang.org/genproto/googleapis/api/label"
+ googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping"
+)
+
+const (
+ // The number of timeserieses to send to GCM in a single request. This
+ // is a hard limit in the GCM API, so we never want to exceed 200.
+ sendBatchSize = 200
+
+ cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s"
+ platformMappingMonitoredResourceKey = "gcp.resource_type"
+)
+
+// key is used to judge the uniqueness of the record descriptor.
+type key struct {
+ name string
+ libraryname string
+}
+
+func keyOf(metrics metricdata.Metrics, library instrumentation.Scope) key {
+ return key{
+ name: metrics.Name,
+ libraryname: library.Name,
+ }
+}
+
+// metricExporter is the implementation of OpenTelemetry metric exporter for
+// Google Cloud Monitoring.
+type metricExporter struct {
+ o *options
+ shutdown chan struct{}
+ // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD.
+ mdCache map[key]*googlemetricpb.MetricDescriptor
+ client *monitoring.MetricClient
+ mdLock sync.RWMutex
+ shutdownOnce sync.Once
+}
+
+// ForceFlush does nothing, the exporter holds no state.
+func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() }
+
+// Shutdown shuts down the client connections.
+func (e *metricExporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ close(e.shutdown)
+ err = errors.Join(ctx.Err(), e.client.Close())
+ })
+ return err
+}
+
+// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring.
+func newMetricExporter(o *options) (*metricExporter, error) {
+ if strings.TrimSpace(o.projectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...)
+ ctx := o.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := monitoring.NewMetricClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if o.compression == "gzip" {
+ client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ }
+
+ cache := map[key]*googlemetricpb.MetricDescriptor{}
+ e := &metricExporter{
+ o: o,
+ mdCache: cache,
+ client: client,
+ shutdown: make(chan struct{}),
+ }
+ return e, nil
+}
+
+var errShutdown = fmt.Errorf("exporter is shutdown")
+
+// Export exports OpenTelemetry Metrics to Google Cloud Monitoring.
+func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ select {
+ case <-me.shutdown:
+ return errShutdown
+ default:
+ }
+
+ if me.o.destinationProjectQuota {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")}))
+ }
+ return errors.Join(
+ me.exportMetricDescriptor(ctx, rm),
+ me.exportTimeSeries(ctx, rm),
+ )
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality {
+ return metric.DefaultTemporalitySelector(ik)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation {
+ return metric.DefaultAggregationSelector(ik)
+}
+
+// exportMetricDescriptor create MetricDescriptor from the record
+// if the descriptor is not registered in Cloud Monitoring yet.
+func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ // We only send metric descriptors if we're configured *and* we're not sending service timeseries.
+ if me.o.disableCreateMetricDescriptors {
+ return nil
+ }
+
+ me.mdLock.Lock()
+ defer me.mdLock.Unlock()
+ mds := make(map[key]*googlemetricpb.MetricDescriptor)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ k := keyOf(metrics, scope.Scope)
+
+ if _, ok := me.mdCache[k]; ok {
+ continue
+ }
+
+ if _, localok := mds[k]; !localok {
+ md := me.recordToMdpb(metrics, extraLabels)
+ mds[k] = md
+ }
+ }
+ }
+
+ // TODO: This process is synchronous and blocks longer time if records in cps
+ // have many different descriptors. In the cps.ForEach above, it should spawn
+ // goroutines to send CreateMetricDescriptorRequest asynchronously in the case
+ // the descriptor does not exist in global cache (me.mdCache).
+ // See details in #26.
+ var errs []error
+ for kmd, md := range mds {
+ err := me.createMetricDescriptorIfNeeded(ctx, md)
+ if err == nil {
+ me.mdCache[kmd] = md
+ }
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error {
+ mdReq := &monitoringpb.GetMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type),
+ }
+ _, err := me.client.GetMetricDescriptor(ctx, mdReq)
+ if err == nil {
+ // If the metric descriptor already exists, skip the CreateMetricDescriptor call.
+ // Metric descriptors cannot be updated without deleting them first, so there
+ // isn't anything we can do here:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify
+ return nil
+ }
+ req := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", me.o.projectID),
+ MetricDescriptor: md,
+ }
+ _, err = me.client.CreateMetricDescriptor(ctx, req)
+ return err
+}
+
+// exportTimeSeries create TimeSeries from the records in cps.
+// res should be the common resource among all TimeSeries, such as instance id, application name and so on.
+func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ tss, err := me.recordsToTspbs(rm)
+ if len(tss) == 0 {
+ return err
+ }
+
+ name := fmt.Sprintf("projects/%s", me.o.projectID)
+
+ errs := []error{err}
+ for i := 0; i < len(tss); i += sendBatchSize {
+ j := i + sendBatchSize
+ if j >= len(tss) {
+ j = len(tss)
+ }
+
+ // TODO: When this exporter is rewritten, support writing to multiple
+ // projects based on the "gcp.project.id" resource.
+ req := &monitoringpb.CreateTimeSeriesRequest{
+ Name: name,
+ TimeSeries: tss[i:j],
+ }
+ if me.o.createServiceTimeSeries {
+ errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req))
+ } else {
+ errs = append(errs, me.client.CreateTimeSeries(ctx, req))
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set {
+ set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter)
+ return &set
+}
+
+// descToMetricType converts descriptor to MetricType proto type.
+// Basically this returns default value ("workload.googleapis.com/[metric type]").
+func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string {
+ if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil {
+ return formatter(desc)
+ }
+ return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name)
+}
+
+// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name.
+func metricTypeToDisplayName(mURL string) string {
+ // strip domain, keep path after domain.
+ u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL))
+ if err != nil || u.Path == "" {
+ return mURL
+ }
+ return strings.TrimLeft(u.Path, "/")
+}
+
+// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor.
+func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor {
+ name := metrics.Name
+ typ := me.descToMetricType(metrics)
+ kind, valueType := recordToMdpbKindType(metrics.Data)
+
+ // Detailed explanations on MetricDescriptor proto is not documented on
+ // generated Go packages. Refer to the original proto file.
+ // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33
+ return &googlemetricpb.MetricDescriptor{
+ Name: name,
+ DisplayName: metricTypeToDisplayName(typ),
+ Type: typ,
+ MetricKind: kind,
+ ValueType: valueType,
+ Unit: string(metrics.Unit),
+ Description: metrics.Description,
+ Labels: labelDescriptors(metrics, extraLabels),
+ }
+}
+
+func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor {
+ labels := []*label.LabelDescriptor{}
+ seenKeys := map[string]struct{}{}
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ // Skip keys that have already been set
+ if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok {
+ continue
+ }
+ labels = append(labels, &label.LabelDescriptor{
+ Key: normalizeLabelKey(string(kv.Key)),
+ })
+ seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{}
+ }
+ }
+ addAttributes(extraLabels)
+ switch a := metrics.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Gauge[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ }
+ return labels
+}
+
+type attributes struct {
+ attrs attribute.Set
+}
+
+func (attrs *attributes) GetString(key string) (string, bool) {
+ value, ok := attrs.attrs.Value(attribute.Key(key))
+ return value.AsString(), ok
+}
+
+// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource
+// proto type for Cloud Monitoring.
+//
+// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors
+func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource {
+ platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey)
+
+ // check if platform mapping is requested and possible
+ if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType {
+ // assemble attributes required to construct this MR
+ attributeMap := make(map[string]string)
+ for expectedLabel := range me.o.monitoredResourceDescription.mrLabels {
+ value, found := res.Set().Value(attribute.Key(expectedLabel))
+ if found {
+ attributeMap[expectedLabel] = value.AsString()
+ }
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: platformMrType.AsString(),
+ Labels: attributeMap,
+ }
+ }
+
+ gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{
+ attrs: attribute.NewSet(res.Attributes()...),
+ })
+ newLabels := make(map[string]string, len(gmr.Labels))
+ for k, v := range gmr.Labels {
+ newLabels[k] = sanitizeUTF8(v)
+ }
+ mr := &monitoredrespb.MonitoredResource{
+ Type: gmr.Type,
+ Labels: newLabels,
+ }
+ return mr
+}
+
+// recordToMdpbKindType return the mapping from OTel's record descriptor to
+// Cloud Monitoring's MetricKind and ValueType.
+func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
+ switch agg := a.(type) {
+ case metricdata.Gauge[int64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Gauge[float64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Sum[int64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Sum[float64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+ default:
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+}
+
+// recordToMpb converts data from records to Metric proto type for Cloud Monitoring.
+func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Scope, extraLabels *attribute.Set) *googlemetricpb.Metric {
+ me.mdLock.RLock()
+ defer me.mdLock.RUnlock()
+ k := keyOf(metrics, library)
+ md, ok := me.mdCache[k]
+ if !ok {
+ md = me.recordToMdpb(metrics, extraLabels)
+ }
+
+ labels := make(map[string]string)
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit())
+ }
+ }
+ addAttributes(extraLabels)
+ addAttributes(&attributes)
+
+ return &googlemetricpb.Metric{
+ Type: md.Type,
+ Labels: labels,
+ }
+}
+
+// recordToTspb converts record to TimeSeries proto type with common resource.
+// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
+func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) {
+ var tss []*monitoringpb.TimeSeries
+ var errs []error
+ if m.Data == nil {
+ return nil, nil
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[int64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Gauge[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[float64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[int64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[int64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[int64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[float64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[float64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[float64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ default:
+ errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()})
+ }
+ return tss, errors.Join(errs...)
+}
+
+func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) {
+ mr := me.resourceToMonitoredResourcepb(rm.Resource)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+
+ var (
+ tss []*monitoringpb.TimeSeries
+ errs []error
+ )
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels)
+ errs = append(errs, err)
+ tss = append(tss, ts...)
+ }
+ }
+
+ return tss, errors.Join(errs...)
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
+
+func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ value, valueType := numberDataPointToValue(point)
+ timestamp := timestamppb.New(point.Time)
+ if err := timestamp.CheckValid(); err != nil {
+ return nil, err
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_GAUGE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: timestamp,
+ },
+ Value: value,
+ }},
+ }, nil
+}
+
+func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ value, valueType := numberDataPointToValue[N](point)
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: value,
+ }},
+ }, nil
+}
+
+// TODO(@dashpole): Refactor to pass control-coupling lint check.
+//
+//nolint:revive
+func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := histToDistribution(point, projectID)
+ if enableSOSD {
+ setSumOfSquaredDeviation(point, distributionValue)
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := expHistToDistribution(point, projectID)
+ // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality.
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) {
+ // The end time of a new interval must be at least a millisecond after the end time of the
+ // previous interval, for all non-gauge types.
+ // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval
+ if end.Sub(start).Milliseconds() <= 1 {
+ end = start.Add(time.Millisecond)
+ }
+ startpb := timestamppb.New(start)
+ endpb := timestamppb.New(end)
+ err := errors.Join(
+ startpb.CheckValid(),
+ endpb.CheckValid(),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &monitoringpb.TimeInterval{
+ StartTime: startpb,
+ EndTime: endpb,
+ }, nil
+}
+
+func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution {
+ counts := make([]int64, len(hist.BucketCounts))
+ for i, v := range hist.BucketCounts {
+ counts[i] = int64(v)
+ }
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: &distribution.Distribution_BucketOptions{
+ Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: hist.Bounds,
+ },
+ },
+ },
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution {
+ // First calculate underflow bucket with all negatives + zeros.
+ underflow := hist.ZeroCount
+ negativeBuckets := hist.NegativeBucket.Counts
+ for i := 0; i < len(negativeBuckets); i++ {
+ underflow += negativeBuckets[i]
+ }
+
+ // Next, pull in remaining buckets.
+ counts := make([]int64, len(hist.PositiveBucket.Counts)+2)
+ bucketOptions := &distribution.Distribution_BucketOptions{}
+ counts[0] = int64(underflow)
+ positiveBuckets := hist.PositiveBucket.Counts
+ for i := 0; i < len(positiveBuckets); i++ {
+ counts[i+1] = int64(positiveBuckets[i])
+ }
+ // Overflow bucket is always empty
+ counts[len(counts)-1] = 0
+
+ if len(hist.PositiveBucket.Counts) == 0 {
+ // We cannot send exponential distributions with no positive buckets,
+ // instead we send a simple overflow/underflow histogram.
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: []float64{0},
+ },
+ }
+ } else {
+ // Exponential histogram
+ growth := math.Exp2(math.Exp2(-float64(hist.Scale)))
+ scale := math.Pow(growth, float64(hist.PositiveBucket.Offset))
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{
+ ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{
+ GrowthFactor: growth,
+ Scale: scale,
+ NumFiniteBuckets: int32(len(counts) - 2),
+ },
+ }
+ }
+
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: bucketOptions,
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar {
+ var exemplars []*distribution.Distribution_Exemplar
+ for _, e := range Exemplars {
+ attachments := []*anypb.Any{}
+ if hasValidSpanContext(e) {
+ sctx, err := anypb.New(&monitoringpb.SpanContext{
+ SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])),
+ })
+ if err == nil {
+ attachments = append(attachments, sctx)
+ }
+ }
+ if len(e.FilteredAttributes) > 0 {
+ attr, err := anypb.New(&monitoringpb.DroppedLabels{
+ Label: attributesToLabels(e.FilteredAttributes),
+ })
+ if err == nil {
+ attachments = append(attachments, attr)
+ }
+ }
+ exemplars = append(exemplars, &distribution.Distribution_Exemplar{
+ Value: float64(e.Value),
+ Timestamp: timestamppb.New(e.Time),
+ Attachments: attachments,
+ })
+ }
+ sort.Slice(exemplars, func(i, j int) bool {
+ return exemplars[i].Value < exemplars[j].Value
+ })
+ return exemplars
+}
+
+func attributesToLabels(attrs []attribute.KeyValue) map[string]string {
+ labels := make(map[string]string, len(attrs))
+ for _, attr := range attrs {
+ labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit())
+ }
+ return labels
+}
+
+var (
+ nilTraceID trace.TraceID
+ nilSpanID trace.SpanID
+)
+
+func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool {
+ return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:])
+}
+
+func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) {
+ var prevBound float64
+ // Calculate the sum of squared deviation.
+ for i := 0; i < len(hist.Bounds); i++ {
+ // Assume all points in the bucket occur at the middle of the bucket range
+ middleOfBucket := (prevBound + hist.Bounds[i]) / 2
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean)
+ prevBound = hist.Bounds[i]
+ }
+ // The infinity bucket is an implicit +Inf bound after the list of explicit bounds.
+ // Assume points in the infinity bucket are at the top of the previous bucket
+ middleOfInfBucket := prevBound
+ if len(dist.BucketCounts) > 0 {
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean)
+ }
+}
+
+func numberDataPointToValue[N int64 | float64](
+ point metricdata.DataPoint[N],
+) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) {
+ switch v := any(point.Value).(type) {
+ case int64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v,
+ }},
+ googlemetricpb.MetricDescriptor_INT64
+ case float64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v,
+ }},
+ googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ // It is impossible to reach this statement
+ return nil, googlemetricpb.MetricDescriptor_INT64
+}
+
+// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149
+//
+// > The label key name must follow:
+// >
+// > * Only upper and lower-case letters, digits and underscores (_) are
+// > allowed.
+// > * Label name must start with a letter or digit.
+// > * The maximum length of a label name is 100 characters.
+//
+// Note: this does not truncate if a label is too long.
+func normalizeLabelKey(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore.
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
new file mode 100644
index 000000000..701b10b10
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
@@ -0,0 +1,201 @@
+// Copyright 2020-2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+ apioption "google.golang.org/api/option"
+)
+
+var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version())
+
+// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific
+// Google Cloud MonitoredResource.
+type MonitoredResourceDescription struct {
+ mrLabels map[string]struct{}
+ mrType string
+}
+
+// Option is function type that is passed to the exporter initialization function.
+type Option func(*options)
+
+// options is the struct to hold options for metricExporter and its client instance.
+type options struct {
+ // context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Cloud Monitoring
+ // clients, and then every time a new batch of metrics needs to be uploaded.
+ //
+ // If unset, context.Background() will be used.
+ context context.Context
+ // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type.
+ // By default, the format string is "workload.googleapis.com/[metric name]".
+ metricDescriptorTypeFormatter func(metricdata.Metrics) string
+ // resourceAttributeFilter determinies which resource attributes to
+ // add to metrics as metric labels. By default, it adds service.name,
+ // service.namespace, and service.instance.id.
+ resourceAttributeFilter attribute.Filter
+ // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific
+ // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided
+ // monitored resource type.
+ monitoredResourceDescription MonitoredResourceDescription
+ // projectID is the identifier of the Cloud Monitoring
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials.
+ //
+ // It will be used in the project_id label of a Google Cloud Monitoring monitored
+ // resource if the resource does not inherently belong to a specific
+ // project, e.g. on-premise resource like k8s_container or generic_task.
+ projectID string
+ // compression enables gzip compression on gRPC calls.
+ compression string
+ // monitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ monitoringClientOptions []apioption.ClientOption
+ // destinationProjectQuota sets whether the request should use quota from
+ // the destination project for the request.
+ destinationProjectQuota bool
+
+ // disableCreateMetricDescriptors disables automatic MetricDescriptor creation
+ disableCreateMetricDescriptors bool
+
+ // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared
+ // deviation. It isn't correct, so we don't send it by default.
+ enableSumOfSquaredDeviation bool
+
+ // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`.
+ // Implicitly, this sets `disableCreateMetricDescriptors` to true.
+ createServiceTimeSeries bool
+}
+
+// WithProjectID sets Google Cloud Platform project as projectID.
+// Without using this option, it automatically detects the project ID
+// from the default credential detection process.
+// Please find the detailed order of the default credential detection process on the doc:
+// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials
+func WithProjectID(id string) func(o *options) {
+ return func(o *options) {
+ o.projectID = id
+ }
+}
+
+// WithDestinationProjectQuota enables per-request usage of the destination
+// project's quota. For example, when setting gcp.project.id on a metric.
+func WithDestinationProjectQuota() func(o *options) {
+ return func(o *options) {
+ o.destinationProjectQuota = true
+ }
+}
+
+// WithMonitoringClientOptions add the options for Cloud Monitoring client instance.
+// Available options are defined in.
+func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) {
+ return func(o *options) {
+ o.monitoringClientOptions = append(o.monitoringClientOptions, opts...)
+ }
+}
+
+// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor.
+// Note that the format has to follow the convention defined in the official document.
+// The default is "workload.googleapis.com/[metric name]".
+// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names
+func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) {
+ return func(o *options) {
+ o.metricDescriptorTypeFormatter = f
+ }
+}
+
+// WithFilteredResourceAttributes determinies which resource attributes to
+// add to metrics as metric labels. By default, it adds service.name,
+// service.namespace, and service.instance.id. This is recommended to avoid
+// writing duplicate timeseries against the same monitored resource. Use
+// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of
+// resource attributes to metric labels.
+func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) {
+ return func(o *options) {
+ o.resourceAttributeFilter = filter
+ }
+}
+
+// DefaultResourceAttributesFilter is the default filter applied to resource
+// attributes.
+func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool {
+ return (kv.Key == semconv.ServiceNameKey ||
+ kv.Key == semconv.ServiceNamespaceKey ||
+ kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0
+}
+
+// NoAttributes can be passed to WithFilteredResourceAttributes to disable
+// adding resource attributes as metric labels.
+func NoAttributes(attribute.KeyValue) bool {
+ return false
+}
+
+// WithDisableCreateMetricDescriptors will disable the automatic creation of
+// MetricDescriptors when an unknown metric is set to be exported.
+func WithDisableCreateMetricDescriptors() func(o *options) {
+ return func(o *options) {
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithCompression sets the compression to use for gRPC requests.
+func WithCompression(c string) func(o *options) {
+ return func(o *options) {
+ o.compression = c
+ }
+}
+
+// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms.
+// It is an estimate, and is not the actual sum of squared deviations.
+func WithSumOfSquaredDeviation() func(o *options) {
+ return func(o *options) {
+ o.enableSumOfSquaredDeviation = true
+ }
+}
+
+// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries.
+// If this is used, metric descriptors are not exported.
+func WithCreateServiceTimeSeries() func(o *options) {
+ return func(o *options) {
+ o.createServiceTimeSeries = true
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided
+// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if
+// found, would be included in the MonitoredResource labels.
+func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) {
+ return func(o *options) {
+ mrLabelSet := make(map[string]struct{})
+ for _, label := range mrLabels {
+ mrLabelSet[label] = struct{}{}
+ }
+ o.monitoredResourceDescription = MonitoredResourceDescription{
+ mrType: mrType,
+ mrLabels: mrLabelSet,
+ }
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
new file mode 100644
index 000000000..47739d0fa
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
@@ -0,0 +1,21 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// Version is the current release version of the OpenTelemetry
+// Operations Metric Exporter in use.
+func Version() string {
+ return "0.51.0"
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
new file mode 100644
index 000000000..510391b82
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
@@ -0,0 +1,285 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resourcemapping
+
+import (
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+const (
+ ProjectIDAttributeKey = "gcp.project.id"
+
+ awsAccount = "aws_account"
+ awsEc2Instance = "aws_ec2_instance"
+ clusterName = "cluster_name"
+ containerName = "container_name"
+ gceInstance = "gce_instance"
+ genericNode = "generic_node"
+ genericTask = "generic_task"
+ instanceID = "instance_id"
+ job = "job"
+ k8sCluster = "k8s_cluster"
+ k8sContainer = "k8s_container"
+ k8sNode = "k8s_node"
+ k8sPod = "k8s_pod"
+ location = "location"
+ namespace = "namespace"
+ namespaceName = "namespace_name"
+ nodeID = "node_id"
+ nodeName = "node_name"
+ podName = "pod_name"
+ region = "region"
+ taskID = "task_id"
+ zone = "zone"
+ gaeInstance = "gae_instance"
+ gaeApp = "gae_app"
+ gaeModuleID = "module_id"
+ gaeVersionID = "version_id"
+ cloudRunRevision = "cloud_run_revision"
+ cloudFunction = "cloud_function"
+ cloudFunctionName = "function_name"
+ serviceName = "service_name"
+ configurationName = "configuration_name"
+ revisionName = "revision_name"
+ bmsInstance = "baremetalsolution.googleapis.com/Instance"
+ unknownServicePrefix = "unknown_service"
+)
+
+var (
+ // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel
+ // resource for a given monitored resource type.
+ monitoredResourceMappings = map[string]map[string]struct {
+ // If none of the otelKeys are present in the Resource, fallback to this literal value
+ fallbackLiteral string
+ // OTel resource keys to try and populate the resource label from. For entries with
+ // multiple OTel resource keys, the keys' values will be coalesced in order until there
+ // is a non-empty value.
+ otelKeys []string
+ }{
+ gceInstance: {
+ zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ k8sContainer: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}},
+ },
+ k8sPod: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ },
+ k8sNode: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}},
+ },
+ k8sCluster: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ },
+ gaeInstance: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}},
+ },
+ gaeApp: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ },
+ awsEc2Instance: {
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ region: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ },
+ awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}},
+ },
+ bmsInstance: {
+ location: {otelKeys: []string{string(semconv.CloudRegionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ genericTask: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}},
+ taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}},
+ },
+ genericNode: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}},
+ },
+ }
+)
+
+// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK.
+type ReadOnlyAttributes interface {
+ GetString(string) (string, bool)
+}
+
+// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Logging.
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeApp, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Monitoring
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeInstance, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPComputeEngine.Value.AsString():
+ return createMonitoredResource(gceInstance, attrs)
+ case semconv.CloudPlatformAWSEC2.Value.AsString():
+ return createMonitoredResource(awsEc2Instance, attrs)
+ // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution
+ // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way
+ // into the semconv module.
+ case "gcp_bare_metal_solution":
+ return createMonitoredResource(bmsInstance, attrs)
+ default:
+ // if k8s.cluster.name is set, pattern match for various k8s resources.
+ // this will also match non-cloud k8s platforms like minikube.
+ if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok {
+ // Try for most to least specific k8s_container, k8s_pod, etc
+ if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok {
+ return createMonitoredResource(k8sContainer, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok {
+ return createMonitoredResource(k8sPod, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok {
+ return createMonitoredResource(k8sNode, attrs)
+ }
+ return createMonitoredResource(k8sCluster, attrs)
+ }
+
+ // Fallback to generic_task
+ _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey))
+ _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey))
+ _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey))
+ _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey))
+ if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) {
+ return createMonitoredResource(genericTask, attrs)
+ }
+
+ // Everything else fallback to generic_node
+ return createMonitoredResource(genericNode, attrs)
+ }
+}
+
+func createMonitoredResource(
+ monitoredResourceType string,
+ resourceAttrs ReadOnlyAttributes,
+) *monitoredrespb.MonitoredResource {
+ mappings := monitoredResourceMappings[monitoredResourceType]
+ mrLabels := make(map[string]string, len(mappings))
+
+ for mrKey, mappingConfig := range mappings {
+ mrValue := ""
+ ok := false
+ // Coalesce the possible keys in order
+ for _, otelKey := range mappingConfig.otelKeys {
+ mrValue, ok = resourceAttrs.GetString(otelKey)
+ if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) {
+ break
+ }
+ }
+ if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) {
+ // the service name started with unknown_service, and was ignored above
+ mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey))
+ }
+ if !ok || mrValue == "" {
+ mrValue = mappingConfig.fallbackLiteral
+ }
+ mrLabels[mrKey] = sanitizeUTF8(mrValue)
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: monitoredResourceType,
+ Labels: mrLabels,
+ }
+}
+
+func contains(list []string, element string) bool {
+ for _, item := range list {
+ if item == element {
+ return true
+ }
+ }
+ return false
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index ff499fb66..304edc342 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -39,9 +39,11 @@ var (
)
// semVerRegex is the regular expression used to parse a semantic version.
-const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+// This is not the official regex from the semver spec. It has been modified to allow for loose handling
+// where versions like 2.1 are detected.
+const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
+ `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
+ `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
// Version represents a single semantic version.
type Version struct {
@@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
}
sv := &Version{
- metadata: m[8],
- pre: m[5],
+ metadata: m[5],
+ pre: m[4],
original: v,
}
@@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[2] != "" {
- sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+ sv.minor, err = strconv.ParseUint(m[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[3] != "" {
- sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+ sv.patch, err = strconv.ParseUint(m[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
func validatePrerelease(p string) error {
eparts := strings.Split(p, ".")
for _, p := range eparts {
- if containsOnly(p, num) {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if containsOnly(p, num) {
if len(p) > 1 && p[0] == '0' {
return ErrSegmentStartsZero
}
@@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
func validateMetadata(m string) error {
eparts := strings.Split(m, ".")
for _, p := range eparts {
- if !containsOnly(p, allowed) {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if !containsOnly(p, allowed) {
return ErrInvalidMetadata
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go
new file mode 100644
index 000000000..dd950a286
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go
@@ -0,0 +1,50 @@
+package bearer
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "time"
+)
+
+// Token provides a type wrapping a bearer token and expiration metadata.
+type Token struct {
+ Value string
+
+ CanExpire bool
+ Expires time.Time
+}
+
+// Expired returns if the token's Expires time is before or equal to the time
+// provided. If CanExpire is false, Expired will always return false.
+func (t Token) Expired(now time.Time) bool {
+ if !t.CanExpire {
+ return false
+ }
+ now = now.Round(0)
+ return now.Equal(t.Expires) || now.After(t.Expires)
+}
+
+// TokenProvider provides interface for retrieving bearer tokens.
+type TokenProvider interface {
+ RetrieveBearerToken(aws.Context) (Token, error)
+}
+
+// TokenProviderFunc provides a helper utility to wrap a function as a type
+// that implements the TokenProvider interface.
+type TokenProviderFunc func(aws.Context) (Token, error)
+
+// RetrieveBearerToken calls the wrapped function, returning the Token or
+// error.
+func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) {
+ return fn(ctx)
+}
+
+// StaticTokenProvider provides a utility for wrapping a static bearer token
+// value within an implementation of a token provider.
+type StaticTokenProvider struct {
+ Token Token
+}
+
+// RetrieveBearerToken returns the static token specified.
+func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) {
+ return s.Token, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 4818ea427..c483e0cb8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -20,16 +20,16 @@ type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure.
//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(&aws.Config{
-// MaxRetries: aws.Int(3),
-// }))
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, &aws.Config{
-// Region: aws.String("us-west-2"),
-// })
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
@@ -192,6 +192,23 @@ type Config struct {
//
EC2MetadataDisableTimeoutOverride *bool
+ // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1.
+ // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility.
+ // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata
+ // client will return any errors encountered from attempting to fetch a token instead of silently
+ // using the insecure data flow of IMDSv1.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataEnableFallback(false)))
+ //
+ // svc := s3.New(sess)
+ //
+ // See [configuring IMDS] for more information.
+ //
+ // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ EC2MetadataEnableFallback *bool
+
// Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
@@ -283,16 +300,16 @@ type Config struct {
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(aws.NewConfig().
-// WithMaxRetries(3),
-// ))
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, aws.NewConfig().
-// WithRegion("us-west-2"),
-// )
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
func NewConfig() *Config {
return &Config{}
}
@@ -425,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config {
return c
}
+// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseFIPSEndpoint(enable bool) *Config {
+ if enable {
+ c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
+ } else {
+ c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled
+ }
+ return c
+}
+
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
@@ -432,6 +460,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
return c
}
+// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config {
+ c.EC2MetadataEnableFallback = &v
+ return c
+}
+
// WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
@@ -576,6 +611,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}
+ if other.EC2MetadataEnableFallback != nil {
+ dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback
+ }
+
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
new file mode 100644
index 000000000..140242dd1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
@@ -0,0 +1,4 @@
+// DO NOT EDIT
+package corehandlers
+
+const isAwsInternal = ""
\ No newline at end of file
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
index ab69c7a6f..ac842c55d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
},
}
+
+var AddAwsInternal = request.NamedHandler{
+ Name: "core.AddAwsInternal",
+ Fn: func(r *request.Request) {
+ if len(isAwsInternal) == 0 {
+ return
+ }
+ request.AddToUserAgent(r, isAwsInternal)
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
index 785f30d8e..329f788a3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -31,6 +31,8 @@ package endpointcreds
import (
"encoding/json"
+ "fmt"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -69,7 +71,37 @@ type Provider struct {
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
+ //
+ // When constructed from environment, the provider will use the value of
+ // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
+ //
+ // Will be overridden if AuthorizationTokenProvider is configured
AuthorizationToken string
+
+ // Optional auth provider func to dynamically load the auth token from a file
+ // everytime a credential is retrieved
+ //
+ // When constructed from environment, the provider will read and use the content
+ // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
+ // as the auth token everytime credentials are retrieved
+ //
+ // Will override AuthorizationToken if configured
+ AuthorizationTokenProvider AuthTokenProvider
+}
+
+// AuthTokenProvider defines an interface to dynamically load a value to be passed
+// for the Authorization header of a credentials request.
+type AuthTokenProvider interface {
+ GetToken() (string, error)
+}
+
+// TokenProviderFunc is a func type implementing AuthTokenProvider interface
+// and enables customizing token provider behavior
+type TokenProviderFunc func() (string, error)
+
+// GetToken func retrieves auth token according to TokenProviderFunc implementation
+func (p TokenProviderFunc) GetToken() (string, error) {
+ return p()
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error
req := p.Client.NewRequest(op, nil, out)
req.SetContext(ctx)
req.HTTPRequest.Header.Set("Accept", "application/json")
- if authToken := p.AuthorizationToken; len(authToken) != 0 {
+
+ authToken := p.AuthorizationToken
+ var err error
+ if p.AuthorizationTokenProvider != nil {
+ authToken, err = p.AuthorizationTokenProvider.GetToken()
+ if err != nil {
+ return nil, fmt.Errorf("get authorization token: %v", err)
+ }
+ }
+
+ if strings.ContainsAny(authToken, "\r\n") {
+ return nil, fmt.Errorf("authorization token contains invalid newline sequence")
+ }
+ if len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
index e62483600..18694f07f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider))
return credentials.NewCredentials(p)
}
-type credentialProcessResponse struct {
- Version int
- AccessKeyID string `json:"AccessKeyId"`
+// A CredentialProcessResponse is the AWS credentials format that must be
+// returned when executing an external credential_process.
+type CredentialProcessResponse struct {
+ // As of this writing, the Version key must be set to 1. This might
+ // increment over time as the structure evolves.
+ Version int
+
+ // The access key ID that identifies the temporary security credentials.
+ AccessKeyID string `json:"AccessKeyId"`
+
+ // The secret access key that can be used to sign requests.
SecretAccessKey string
- SessionToken string
- Expiration *time.Time
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ SessionToken string
+
+ // The date on which the current credentials expire.
+ Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
@@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
}
// Serialize and validate response
- resp := &credentialProcessResponse{}
+ resp := &CredentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
index 6eda2a555..4138e725d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
@@ -4,13 +4,13 @@ import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
- "fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/auth/bearer"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
@@ -55,6 +55,19 @@ type Provider struct {
// The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
StartURL string
+
+ // The filepath the cached token will be retrieved from. If unset Provider will
+ // use the startURL to determine the filepath at.
+ //
+ // ~/.aws/sso/cache/.json
+ //
+ // If custom cached token filepath is used, the Provider's startUrl
+ // parameter will be ignored.
+ CachedTokenFilepath string
+
+ // Used by the SSOCredentialProvider if a token configuration
+ // profile is used in the shared config
+ TokenProvider bearer.TokenProvider
}
// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
@@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) {
// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
// by exchanging the accessToken present in ~/.aws/sso/cache.
func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- tokenFile, err := loadTokenFile(p.StartURL)
- if err != nil {
- return credentials.Value{}, err
+ var accessToken *string
+ if p.TokenProvider != nil {
+ token, err := p.TokenProvider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return credentials.Value{}, err
+ }
+ accessToken = &token.Value
+ } else {
+ if p.CachedTokenFilepath == "" {
+ cachedTokenFilePath, err := getCachedFilePath(p.StartURL)
+ if err != nil {
+ return credentials.Value{}, err
+ }
+ p.CachedTokenFilepath = cachedTokenFilePath
+ }
+
+ tokenFile, err := loadTokenFile(p.CachedTokenFilepath)
+ if err != nil {
+ return credentials.Value{}, err
+ }
+ accessToken = &tokenFile.AccessToken
}
output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
- AccessToken: &tokenFile.AccessToken,
+ AccessToken: accessToken,
AccountId: &p.AccountID,
RoleName: &p.RoleName,
})
@@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val
}, nil
}
-func getCacheFileName(url string) (string, error) {
+func getCachedFilePath(startUrl string) (string, error) {
hash := sha1.New()
- _, err := hash.Write([]byte(url))
+ _, err := hash.Write([]byte(startUrl))
if err != nil {
return "", err
}
- return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
-}
-
-type rfc3339 time.Time
-
-func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
- var value string
-
- if err := json.Unmarshal(bytes, &value); err != nil {
- return err
- }
-
- parse, err := time.Parse(time.RFC3339, value)
- if err != nil {
- return fmt.Errorf("expected RFC3339 timestamp: %v", err)
- }
-
- *r = rfc3339(parse)
-
- return nil
+ return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil
}
type token struct {
@@ -153,13 +165,8 @@ func (t token) Expired() bool {
return nowTime().Round(0).After(time.Time(t.ExpiresAt))
}
-func loadTokenFile(startURL string) (t token, err error) {
- key, err := getCacheFileName(startURL)
- if err != nil {
- return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
- }
-
- fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
+func loadTokenFile(cachedTokenPath string) (t token, err error) {
+ fileBytes, err := ioutil.ReadFile(cachedTokenPath)
if err != nil {
return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go
new file mode 100644
index 000000000..f6fa88451
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go
@@ -0,0 +1,237 @@
+package ssocreds
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var resolvedOsUserHomeDir = shareddefaults.UserHomeDir
+
+// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
+// error if unable get derive the path. Key that will be used to compute a SHA1
+// value that is hex encoded.
+//
+// Derives the filepath using the Key as:
+//
+// ~/.aws/sso/cache/.json
+func StandardCachedTokenFilepath(key string) (string, error) {
+ homeDir := resolvedOsUserHomeDir()
+ if len(homeDir) == 0 {
+ return "", fmt.Errorf("unable to get USER's home directory for cached token")
+ }
+ hash := sha1.New()
+ if _, err := hash.Write([]byte(key)); err != nil {
+ return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err)
+ }
+
+ cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json"
+
+ return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil
+}
+
+type tokenKnownFields struct {
+ AccessToken string `json:"accessToken,omitempty"`
+ ExpiresAt *rfc3339 `json:"expiresAt,omitempty"`
+
+ RefreshToken string `json:"refreshToken,omitempty"`
+ ClientID string `json:"clientId,omitempty"`
+ ClientSecret string `json:"clientSecret,omitempty"`
+}
+
+type cachedToken struct {
+ tokenKnownFields
+ UnknownFields map[string]interface{} `json:"-"`
+}
+
+// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields
+// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal
+// This function adds some extra validation to the known fields and captures unknown fields.
+func (t cachedToken) MarshalJSON() ([]byte, error) {
+ fields := map[string]interface{}{}
+
+ setTokenFieldString(fields, "accessToken", t.AccessToken)
+ setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt)
+
+ setTokenFieldString(fields, "refreshToken", t.RefreshToken)
+ setTokenFieldString(fields, "clientId", t.ClientID)
+ setTokenFieldString(fields, "clientSecret", t.ClientSecret)
+
+ for k, v := range t.UnknownFields {
+ if _, ok := fields[k]; ok {
+ return nil, fmt.Errorf("unknown token field %v, duplicates known field", k)
+ }
+ fields[k] = v
+ }
+
+ return json.Marshal(fields)
+}
+
+func setTokenFieldString(fields map[string]interface{}, key, value string) {
+ if value == "" {
+ return
+ }
+ fields[key] = value
+}
+func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) {
+ if value == nil {
+ return
+ }
+ fields[key] = value
+}
+
+// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified
+// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal
+// This function adds some extra validation to the known fields and captures unknown fields.
+func (t *cachedToken) UnmarshalJSON(b []byte) error {
+ var fields map[string]interface{}
+ if err := json.Unmarshal(b, &fields); err != nil {
+ return nil
+ }
+
+ t.UnknownFields = map[string]interface{}{}
+
+ for k, v := range fields {
+ var err error
+ switch k {
+ case "accessToken":
+ err = getTokenFieldString(v, &t.AccessToken)
+ case "expiresAt":
+ err = getTokenFieldRFC3339(v, &t.ExpiresAt)
+ case "refreshToken":
+ err = getTokenFieldString(v, &t.RefreshToken)
+ case "clientId":
+ err = getTokenFieldString(v, &t.ClientID)
+ case "clientSecret":
+ err = getTokenFieldString(v, &t.ClientSecret)
+ default:
+ t.UnknownFields[k] = v
+ }
+
+ if err != nil {
+ return fmt.Errorf("field %q, %v", k, err)
+ }
+ }
+
+ return nil
+}
+
+func getTokenFieldString(v interface{}, value *string) error {
+ var ok bool
+ *value, ok = v.(string)
+ if !ok {
+ return fmt.Errorf("expect value to be string, got %T", v)
+ }
+ return nil
+}
+
+func getTokenFieldRFC3339(v interface{}, value **rfc3339) error {
+ var stringValue string
+ if err := getTokenFieldString(v, &stringValue); err != nil {
+ return err
+ }
+
+ timeValue, err := parseRFC3339(stringValue)
+ if err != nil {
+ return err
+ }
+
+ *value = &timeValue
+ return nil
+}
+
+func loadCachedToken(filename string) (cachedToken, error) {
+ fileBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err)
+ }
+
+ var t cachedToken
+ if err := json.Unmarshal(fileBytes, &t); err != nil {
+ return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err)
+ }
+
+ if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() {
+ return cachedToken{}, fmt.Errorf(
+ "cached SSO token must contain accessToken and expiresAt fields")
+ }
+
+ return t, nil
+}
+
+func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) {
+ tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10)
+ if err := writeCacheFile(tmpFilename, fileMode, t); err != nil {
+ return err
+ }
+
+ if err := os.Rename(tmpFilename, filename); err != nil {
+ return fmt.Errorf("failed to replace old cached SSO token file, %v", err)
+ }
+
+ return nil
+}
+
+func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) {
+ var f *os.File
+ f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode)
+ if err != nil {
+ return fmt.Errorf("failed to create cached SSO token file %v", err)
+ }
+
+ defer func() {
+ closeErr := f.Close()
+ if err == nil && closeErr != nil {
+ err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr)
+ }
+ }()
+
+ encoder := json.NewEncoder(f)
+
+ if err = encoder.Encode(t); err != nil {
+ return fmt.Errorf("failed to serialize cached SSO token, %v", err)
+ }
+
+ return nil
+}
+
+type rfc3339 time.Time
+
+// UnmarshalJSON decode rfc3339 from JSON format
+func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
+ var value string
+ var err error
+
+ if err = json.Unmarshal(bytes, &value); err != nil {
+ return err
+ }
+
+ *r, err = parseRFC3339(value)
+ return err
+}
+
+func parseRFC3339(v string) (rfc3339, error) {
+ parsed, err := time.Parse(time.RFC3339, v)
+ if err != nil {
+ return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err)
+ }
+
+ return rfc3339(parsed), nil
+}
+
+// MarshalJSON encode rfc3339 to JSON format time
+func (r *rfc3339) MarshalJSON() ([]byte, error) {
+ value := time.Time(*r).Format(time.RFC3339)
+
+ // Use JSON unmarshal to unescape the quoted value making use of JSON's
+ // quoting rules.
+ return json.Marshal(value)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go
new file mode 100644
index 000000000..3388b78b4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go
@@ -0,0 +1,148 @@
+package ssocreds
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/auth/bearer"
+ "github.com/aws/aws-sdk-go/service/ssooidc"
+)
+
+// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API
+// client for calling CreateToken operation to refresh the SSO token.
+type CreateTokenAPIClient interface {
+ CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error)
+}
+
+// SSOTokenProviderOptions provides the options for configuring the
+// SSOTokenProvider.
+type SSOTokenProviderOptions struct {
+ // Client that can be overridden
+ Client CreateTokenAPIClient
+
+ // The path the file containing the cached SSO token will be read from.
+ // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter.
+ CachedTokenFilepath string
+}
+
+// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for
+// Bearer Authentication. The SSOTokenProvider can only be used to refresh
+// already cached SSO Tokens. This utility cannot perform the initial SSO
+// create token.
+//
+// The initial SSO create token should be preformed with the AWS CLI before the
+// Go application using the SSOTokenProvider will need to retrieve the SSO
+// token. If the AWS CLI has not created the token cache file, this provider
+// will return an error when attempting to retrieve the cached token.
+//
+// This provider will attempt to refresh the cached SSO token periodically if
+// needed when RetrieveBearerToken is called.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file.
+// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+type SSOTokenProvider struct {
+ options SSOTokenProviderOptions
+}
+
+// NewSSOTokenProvider returns an initialized SSOTokenProvider that will
+// periodically refresh the SSO token cached stored in the cachedTokenFilepath.
+// The cachedTokenFilepath file's content will be rewritten by the token
+// provider when the token is refreshed.
+//
+// The client must be configured for the AWS region the SSO token was created for.
+func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider {
+ options := SSOTokenProviderOptions{
+ Client: client,
+ CachedTokenFilepath: cachedTokenFilepath,
+ }
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ provider := &SSOTokenProvider{
+ options: options,
+ }
+
+ return provider
+}
+
+// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath
+// the SSOTokenProvider was created with. If the token has expired
+// RetrieveBearerToken will attempt to refresh it. If the token cannot be
+// refreshed or is not present an error will be returned.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) {
+ cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath)
+ if err != nil {
+ return bearer.Token{}, err
+ }
+
+ if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) {
+ cachedToken, err = p.refreshToken(cachedToken)
+ if err != nil {
+ return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err)
+ }
+ }
+
+ expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt))
+ return bearer.Token{
+ Value: cachedToken.AccessToken,
+ CanExpire: !expiresAt.IsZero(),
+ Expires: expiresAt,
+ }, nil
+}
+
+func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) {
+ if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" {
+ return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed")
+ }
+
+ createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{
+ ClientId: &token.ClientID,
+ ClientSecret: &token.ClientSecret,
+ RefreshToken: &token.RefreshToken,
+ GrantType: aws.String("refresh_token"),
+ })
+ if err != nil {
+ return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err)
+ }
+ if createResult.ExpiresIn == nil {
+ return cachedToken{}, fmt.Errorf("missing required field ExpiresIn")
+ }
+ if createResult.AccessToken == nil {
+ return cachedToken{}, fmt.Errorf("missing required field AccessToken")
+ }
+ if createResult.RefreshToken == nil {
+ return cachedToken{}, fmt.Errorf("missing required field RefreshToken")
+ }
+
+ expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second)
+
+ token.AccessToken = *createResult.AccessToken
+ token.ExpiresAt = (*rfc3339)(&expiresAt)
+ token.RefreshToken = *createResult.RefreshToken
+
+ fileInfo, err := os.Stat(p.options.CachedTokenFilepath)
+ if err != nil {
+ return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err)
+ }
+
+ if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil {
+ return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err)
+ }
+
+ return token, nil
+}
+
+func toTime(p *time.Time) (v time.Time) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 260a37cbb..86db488de 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -9,7 +9,7 @@ to refresh the credentials will be synchronized. But, the SDK is unable to
ensure synchronous usage of the AssumeRoleProvider if the value is shared
between multiple Credentials, Sessions or service clients.
-Assume Role
+# Assume Role
To assume an IAM role using STS with the SDK you can create a new Credentials
with the SDKs's stscreds package.
@@ -27,7 +27,7 @@ with the SDKs's stscreds package.
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
-Assume Role with static MFA Token
+# Assume Role with static MFA Token
To assume an IAM role with a MFA token you can either specify a MFA token code
directly or provide a function to prompt the user each time the credentials
@@ -49,7 +49,7 @@ credentials.
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
-Assume Role with MFA Token Provider
+# Assume Role with MFA Token Provider
To assume an IAM role with MFA for longer running tasks where the credentials
may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
@@ -74,7 +74,6 @@ single Credentials with an AssumeRoleProvider can be shared safely.
// Create service client value configured for credentials
// from assumed role.
svc := s3.New(sess, &aws.Config{Credentials: creds})
-
*/
package stscreds
@@ -199,6 +198,10 @@ type AssumeRoleProvider struct {
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
SerialNumber *string
+ // The SourceIdentity which is used to identity a persistent identity through the whole session.
+ // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ SourceIdentity *string
+
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
@@ -320,6 +323,7 @@ func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (crede
Tags: p.Tags,
PolicyArns: p.PolicyArns,
TransitiveTagKeys: p.TransitiveTagKeys,
+ SourceIdentity: p.SourceIdentity,
}
if p.Policy != nil {
input.Policy = p.Policy
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 23bb639e0..1ba80b576 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -9,6 +9,7 @@ package defaults
import (
"fmt"
+ "io/ioutil"
"net"
"net/http"
"net/url"
@@ -74,6 +75,7 @@ func Handlers() request.Handlers {
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddAwsInternal)
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
@@ -114,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
+// direct representation of the IPv4 address for the ECS container
+// "169.254.170.2"
+var ecsContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 2,
+}
+
+// direct representation of the IPv4 address for the EKS container
+// "169.254.170.23"
+var eksContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 23,
+}
+
+// direct representation of the IPv6 address for the EKS container
+// "fd00:ec2::23"
+var eksContainerIPv6 net.IP = []byte{
+ 0xFD, 0, 0xE, 0xC2,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0x23,
+}
+
// RemoteCredProvider returns a credentials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
@@ -134,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
var lookupHostFn = net.LookupHost
-func isLoopbackHost(host string) (bool, error) {
- ip := net.ParseIP(host)
- if ip != nil {
- return ip.IsLoopback(), nil
+// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
+//
+// host can either be an IP address OR an unresolved hostname - resolution will
+// be automatically performed in the latter case
+func isAllowedHost(host string) (bool, error) {
+ if ip := net.ParseIP(host); ip != nil {
+ return isIPAllowed(ip), nil
}
- // Host is not an ip, perform lookup
addrs, err := lookupHostFn(host)
if err != nil {
return false, err
}
+
for _, addr := range addrs {
- if !net.ParseIP(addr).IsLoopback() {
+ if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
return false, nil
}
}
@@ -154,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) {
return true, nil
}
+func isIPAllowed(ip net.IP) bool {
+ return ip.IsLoopback() ||
+ ip.Equal(ecsContainerIPv4) ||
+ ip.Equal(eksContainerIPv4) ||
+ ip.Equal(eksContainerIPv6)
+}
+
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
var errMsg string
@@ -164,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string)
host := aws.URLHostname(parsed)
if len(host) == 0 {
errMsg = "unable to parse host from local HTTP cred provider URL"
- } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
- errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
- } else if !isLoopback {
- errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ } else if parsed.Scheme == "http" {
+ if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr)
+ } else if !isAllowedHost {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host)
+ }
}
}
@@ -189,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
+ p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
+ if contents, err := ioutil.ReadFile(authFilePath); err != nil {
+ return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
+ } else {
+ return string(contents), nil
+ }
+ })
+ }
},
)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
index df63bade1..f4cc8751d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -57,13 +57,13 @@ type EC2Metadata struct {
// New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines.
//
-//
// Example:
-// // Create a EC2Metadata client from just a session.
-// svc := ec2metadata.New(mySession)
//
-// // Create a EC2Metadata client with additional configuration
-// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
index 4b29f190b..f1f9ba4ec 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -1,6 +1,8 @@
package ec2metadata
import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
"net/http"
"sync/atomic"
"time"
@@ -33,11 +35,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
return &tokenProvider{client: c, configuredTTL: duration}
}
+// check if fallback is enabled
+func (t *tokenProvider) fallbackEnabled() bool {
+ return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback
+}
+
// fetchTokenHandler fetches token for EC2Metadata service client by default.
func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
-
// short-circuits to insecure data flow if tokenProvider is disabled.
- if v := atomic.LoadUint32(&t.disabled); v == 1 {
+ if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() {
return
}
@@ -49,23 +55,23 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
output, err := t.client.getToken(r.Context(), t.configuredTTL)
if err != nil {
+ // only attempt fallback to insecure data flow if IMDSv1 is enabled
+ if !t.fallbackEnabled() {
+ r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err)
+ return
+ }
- // change the disabled flag on token provider to true,
- // when error is request timeout error.
+ // change the disabled flag on token provider to true and fallback
if requestFailureError, ok := err.(awserr.RequestFailure); ok {
switch requestFailureError.StatusCode() {
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
atomic.StoreUint32(&t.disabled, 1)
+ if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) {
+ t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
+ }
case http.StatusBadRequest:
r.Error = requestFailureError
}
-
- // Check if request timed out while waiting for response
- if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
- if e.Code() == request.ErrCodeRequestError {
- atomic.StoreUint32(&t.disabled, 1)
- }
- }
}
return
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 4df415be5..c3516e018 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -13,6 +13,8 @@ const (
AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition.
AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition.
+ AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition.
+ AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition.
)
// AWS Standard partition's regions.
@@ -23,16 +25,22 @@ const (
ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka).
ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad).
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
+ ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ CaWest1RegionID = "ca-west-1" // Canada West (Calgary).
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
+ EuCentral2RegionID = "eu-central-2" // Europe (Zurich).
EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
EuSouth1RegionID = "eu-south-1" // Europe (Milan).
+ EuSouth2RegionID = "eu-south-2" // Europe (Spain).
EuWest1RegionID = "eu-west-1" // Europe (Ireland).
EuWest2RegionID = "eu-west-2" // Europe (London).
EuWest3RegionID = "eu-west-3" // Europe (Paris).
+ IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv).
MeCentral1RegionID = "me-central-1" // Middle East (UAE).
MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
@@ -65,8 +73,16 @@ const (
UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio).
)
+// AWS ISOE (Europe) partition's regions.
+const (
+ EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West.
+)
+
+// AWS ISOF partition's regions.
+const ()
+
// DefaultResolver returns an Endpoint resolver that will be able
-// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF.
//
// Use DefaultPartitions() to get the list of the default partitions.
func DefaultResolver() Resolver {
@@ -74,7 +90,7 @@ func DefaultResolver() Resolver {
}
// DefaultPartitions returns a list of the partitions the SDK is bundled
-// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF.
//
// partitions := endpoints.DefaultPartitions
// for _, p := range partitions {
@@ -90,6 +106,8 @@ var defaultPartitions = partitions{
awsusgovPartition,
awsisoPartition,
awsisobPartition,
+ awsisoePartition,
+ awsisofPartition,
}
// AwsPartition returns the Resolver for AWS Standard.
@@ -103,7 +121,7 @@ var awsPartition = partition{
DNSSuffix: "amazonaws.com",
RegionRegex: regionRegex{
Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$")
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$")
return reg
}(),
},
@@ -157,6 +175,9 @@ var awsPartition = partition{
"ap-south-1": region{
Description: "Asia Pacific (Mumbai)",
},
+ "ap-south-2": region{
+ Description: "Asia Pacific (Hyderabad)",
+ },
"ap-southeast-1": region{
Description: "Asia Pacific (Singapore)",
},
@@ -166,18 +187,30 @@ var awsPartition = partition{
"ap-southeast-3": region{
Description: "Asia Pacific (Jakarta)",
},
+ "ap-southeast-4": region{
+ Description: "Asia Pacific (Melbourne)",
+ },
"ca-central-1": region{
Description: "Canada (Central)",
},
+ "ca-west-1": region{
+ Description: "Canada West (Calgary)",
+ },
"eu-central-1": region{
Description: "Europe (Frankfurt)",
},
+ "eu-central-2": region{
+ Description: "Europe (Zurich)",
+ },
"eu-north-1": region{
Description: "Europe (Stockholm)",
},
"eu-south-1": region{
Description: "Europe (Milan)",
},
+ "eu-south-2": region{
+ Description: "Europe (Spain)",
+ },
"eu-west-1": region{
Description: "Europe (Ireland)",
},
@@ -187,6 +220,9 @@ var awsPartition = partition{
"eu-west-3": region{
Description: "Europe (Paris)",
},
+ "il-central-1": region{
+ Description: "Israel (Tel Aviv)",
+ },
"me-central-1": region{
Description: "Middle East (UAE)",
},
@@ -210,13 +246,6 @@ var awsPartition = partition{
},
},
Services: services{
- "a4b": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
"access-analyzer": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -237,6 +266,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -246,6 +278,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -255,15 +290,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -282,6 +332,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -318,6 +377,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -399,6 +461,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -408,6 +473,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -426,15 +494,39 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "acm-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "acm-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -444,6 +536,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -552,6 +647,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -561,6 +659,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -570,15 +671,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -597,6 +713,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -633,6 +758,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -677,32 +808,69 @@ var awsPartition = partition{
},
},
},
+ "agreement-marketplace": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
"airflow": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -712,6 +880,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -721,6 +898,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -737,6 +917,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -795,6 +978,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -813,6 +999,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -850,6 +1039,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -868,6 +1060,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -897,6 +1092,49 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "aoss": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"api.detective": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -928,6 +1166,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -946,6 +1199,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -1084,6 +1340,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "api.ecr.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -1108,6 +1372,14 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "api.ecr.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -1116,6 +1388,14 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "api.ecr.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "dkr-us-east-1",
}: endpoint{
@@ -1196,6 +1476,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "api.ecr.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -1212,6 +1500,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "api.ecr.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -1308,6 +1604,14 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "api.ecr.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{
@@ -1402,6 +1706,26 @@ var awsPartition = partition{
},
},
},
+ "api.ecr-public": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
"api.elastic-inference": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -1593,6 +1917,14 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "api.iotwireless.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -1601,6 +1933,14 @@ var awsPartition = partition{
Region: "eu-west-1",
},
},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "api.iotwireless.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{
@@ -1621,24 +1961,60 @@ var awsPartition = partition{
},
"api.mediatailor": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -1656,6 +2032,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -1689,6 +2068,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -1698,18 +2080,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -1719,6 +2113,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -1896,6 +2296,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -1960,6 +2363,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -1969,6 +2375,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -1978,15 +2387,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "apigateway-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2005,6 +2429,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -2041,6 +2474,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2142,6 +2578,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -2151,18 +2590,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2172,6 +2623,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2215,24 +2669,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2242,6 +2714,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2297,21 +2775,81 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-2.amazonaws.com",
+ },
},
},
"application-autoscaling": service{
@@ -2339,6 +2877,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -2348,18 +2889,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2369,6 +2922,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -2412,24 +2968,36 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2439,6 +3007,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2551,6 +3122,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "appmesh-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -2605,6 +3197,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.eu-west-3.api.aws",
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2632,6 +3233,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.us-east-1.api.aws",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
@@ -2641,6 +3263,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.us-east-2.api.aws",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "appmesh-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
@@ -2650,6 +3293,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.us-west-1.api.aws",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -2659,6 +3323,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.us-west-2.api.aws",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "appmesh-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"apprunner": service{
@@ -2666,9 +3351,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -2771,6 +3474,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -2814,6 +3520,9 @@ var awsPartition = partition{
},
"appsync": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -2829,24 +3538,39 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -2856,6 +3580,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -2886,30 +3616,133 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -2920,51 +3753,192 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.af-south-1.api.aws",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-east-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-northeast-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-northeast-2.api.aws",
+ },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-northeast-3.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-southeast-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-central-2.api.aws",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-north-1.api.aws",
+ },
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-south-2.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-west-2.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.eu-west-3.api.aws",
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -3001,48 +3975,126 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.me-central-1.api.aws",
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.me-south-1.api.aws",
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.sa-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-east-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-west-2.api.aws",
+ },
},
},
"auditmanager": service{
@@ -3074,15 +4126,75 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"autoscaling": service{
@@ -3110,6 +4222,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3119,18 +4234,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -3140,6 +4279,63 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -3152,15 +4348,39 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
+ },
},
},
"autoscaling-plans": service{
@@ -3258,6 +4478,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3267,18 +4490,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -3288,6 +4523,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -3375,22 +4616,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "backupstorage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
"batch": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -3419,6 +4644,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3428,18 +4656,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -3485,6 +4725,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -3529,51 +4775,385 @@ var awsPartition = partition{
},
},
},
- "billingconductor": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "billingconductor.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "braket": service{
+ "bedrock": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "eu-west-2",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-southeast-2",
}: endpoint{},
- },
- },
- "budgets": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "aws-global",
+ Region: "bedrock-ap-northeast-1",
}: endpoint{
- Hostname: "budgets.amazonaws.com",
+ Hostname: "bedrock.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ap-northeast-1",
},
},
- },
- },
- "cassandra": service{
+ endpointKey{
+ Region: "bedrock-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ap-southeast-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-central-1",
+ }: endpoint{
+ Hostname: "bedrock.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-2",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-3",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-northeast-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-southeast-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-3",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-sa-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-sa-east-1",
+ }: endpoint{
+ Hostname: "bedrock.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "billingconductor": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "billingconductor.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "braket": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cases": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ },
+ },
+ "cassandra": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-east-1",
@@ -3702,133 +5282,166 @@ var awsPartition = partition{
},
},
},
- "cloud9": service{
+ "cleanrooms": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "cloudcontrolapi": service{
+ "cloud9": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-3",
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
+ Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
+ Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
@@ -3837,7 +5450,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
+ Hostname: "cloud9-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -3846,7 +5459,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
+ Hostname: "cloud9-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -3855,7 +5468,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
+ Hostname: "cloud9-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -3864,107 +5477,556 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
+ Hostname: "cloud9-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
+ Hostname: "cloud9-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
+ Hostname: "cloud9-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-2.api.aws",
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
+ Hostname: "cloud9-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
+ Hostname: "cloud9-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-2.api.aws",
},
},
},
- "clouddirectory": service{
+ "cloudcontrolapi": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
+ Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.af-south-1.api.aws",
+ },
endpointKey{
- Region: "eu-central-1",
+ Region: "ap-east-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-east-1.api.aws",
+ },
endpointKey{
- Region: "eu-west-2",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-1.api.aws",
+ },
endpointKey{
- Region: "us-east-2",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-2.api.aws",
+ },
endpointKey{
- Region: "af-south-1",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-3.api.aws",
+ },
endpointKey{
- Region: "ap-northeast-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-south-1.api.aws",
+ },
endpointKey{
- Region: "ap-northeast-3",
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-2.api.aws",
+ },
+ },
+ },
+ "clouddirectory": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "cloudformation": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3974,18 +6036,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -3995,6 +6069,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -4127,6 +6204,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -4142,6 +6222,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -4157,6 +6240,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -4231,6 +6320,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -4240,18 +6332,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -4297,6 +6401,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -4344,6 +6451,79 @@ var awsPartition = partition{
},
},
},
+ "cloudtrail-data": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"codeartifact": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -4407,6 +6587,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -4416,18 +6599,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -4437,6 +6629,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -4517,6 +6715,17 @@ var awsPartition = partition{
},
},
},
+ "codecatalyst": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "codecatalyst.global.api.aws",
+ },
+ },
+ },
"codecommit": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -4537,12 +6746,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -4588,6 +6803,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -4688,6 +6909,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -4697,18 +6921,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -4718,6 +6954,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -4837,6 +7076,9 @@ var awsPartition = partition{
},
"codepipeline": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -4846,15 +7088,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -4867,12 +7121,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -4927,6 +7187,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -5013,6 +7282,64 @@ var awsPartition = partition{
},
"codestar-connections": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "codestar-notifications": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -5046,6 +7373,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -5065,30 +7395,60 @@ var awsPartition = partition{
},
"cognito-identity": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -5116,6 +7476,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@@ -5125,6 +7494,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -5152,6 +7527,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cognito-identity-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -5165,30 +7546,60 @@ var awsPartition = partition{
},
"cognito-idp": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -5234,6 +7645,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -5413,12 +7830,27 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -5525,6 +7957,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -5541,6 +7981,22 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -5557,6 +8013,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -5573,6 +8037,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -5597,6 +8069,22 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "compute-optimizer.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "compute-optimizer.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -5667,6 +8155,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -5676,18 +8167,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -5733,6 +8236,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -5806,12 +8312,42 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "connect-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "connect-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "connect-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "connect-fips.us-west-2.amazonaws.com",
+ },
},
},
"connect-campaigns": service{
@@ -5819,15 +8355,51 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
+ },
},
},
"contact-lens": service{
@@ -5838,6 +8410,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
@@ -5860,21 +8438,39 @@ var awsPartition = partition{
},
"controltower": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -5893,12 +8489,39 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -5908,6 +8531,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -5947,6 +8579,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -5967,6 +8617,18 @@ var awsPartition = partition{
},
},
},
+ "cost-optimization-hub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "cost-optimization-hub.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"cur": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -6071,6 +8733,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -6476,6 +9141,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -6485,6 +9153,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -6494,15 +9165,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "datasync-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6521,6 +9207,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -6557,6 +9252,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -6601,6 +9302,190 @@ var awsPartition = partition{
},
},
},
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "datazone.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "datazone.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "datazone.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "datazone.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "datazone.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "datazone.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "datazone.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "datazone.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "datazone.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "datazone.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "datazone.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "datazone.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "datazone.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "datazone.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "datazone.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "datazone.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "datazone.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "datazone.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "datazone.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "datazone.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "datazone.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "datazone.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"dax": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -6618,6 +9503,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6676,6 +9567,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -6691,6 +9588,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -6709,6 +9615,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@@ -6742,6 +9657,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -6773,6 +9694,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -6782,18 +9706,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6803,6 +9751,24 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -6839,6 +9805,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -6931,6 +9900,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -6940,18 +9912,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6961,6 +9945,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -7001,6 +9991,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7010,9 +10003,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "dms",
}: endpoint{
@@ -7043,12 +10042,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7058,6 +10063,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -7278,71 +10286,7 @@ var awsPartition = partition{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
+ Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
@@ -7354,23 +10298,26 @@ var awsPartition = partition{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- },
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7380,19 +10327,10 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
+ Hostname: "drs-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -7401,7 +10339,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
+ Hostname: "drs-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -7410,7 +10348,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
+ Hostname: "drs-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -7419,12 +10357,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
+ Hostname: "drs-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -7438,7 +10382,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
+ Hostname: "drs-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -7447,7 +10391,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
+ Hostname: "drs-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -7456,7 +10400,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
+ Hostname: "drs-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -7465,16 +10409,11 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
+ Hostname: "drs-fips.us-west-2.amazonaws.com",
},
},
},
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
+ "ds": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -7494,6 +10433,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7504,173 +10446,31 @@ var awsPartition = partition{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "local",
- }: endpoint{
- Hostname: "localhost:8000",
- Protocols: []string{"http"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-2",
+ Region: "ca-west-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ Hostname: "ds-fips.ca-west-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
@@ -7678,6 +10478,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7690,16 +10493,25 @@ var awsPartition = partition{
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "ebs-fips.ca-central-1.amazonaws.com",
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "ebs-fips.us-east-1.amazonaws.com",
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -7708,7 +10520,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "ebs-fips.us-east-2.amazonaws.com",
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -7717,7 +10529,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "ebs-fips.us-west-1.amazonaws.com",
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -7726,12 +10538,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "ebs-fips.us-west-2.amazonaws.com",
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -7745,7 +10563,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ebs-fips.us-east-1.amazonaws.com",
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -7754,7 +10572,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ebs-fips.us-east-2.amazonaws.com",
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -7763,7 +10581,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ebs-fips.us-west-1.amazonaws.com",
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -7772,11 +10590,11 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ebs-fips.us-west-2.amazonaws.com",
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
},
},
},
- "ec2": service{
+ "dynamodb": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
Protocols: []string{"http", "https"},
@@ -7802,11 +10620,8 @@ var awsPartition = partition{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.ap-south-1.api.aws",
- },
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7816,6 +10631,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -7823,11 +10641,41 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ec2-fips.ca-central-1.amazonaws.com",
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -7835,14 +10683,11 @@ var awsPartition = partition{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.eu-west-1.api.aws",
- },
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@@ -7850,49 +10695,16 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ec2-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "local",
}: endpoint{
- Hostname: "ec2-fips.us-east-1.amazonaws.com",
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
CredentialScope: credentialScope{
Region: "us-east-1",
},
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ec2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ec2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ec2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
},
endpointKey{
Region: "me-central-1",
@@ -7903,41 +10715,41 @@ var awsPartition = partition{
endpointKey{
Region: "sa-east-1",
}: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.sa-east-1.api.aws",
- },
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
- Variant: dualStackVariant,
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "ec2.us-east-1.api.aws",
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "us-east-1-fips",
}: endpoint{
- Hostname: "ec2-fips.us-east-1.amazonaws.com",
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
- Variant: dualStackVariant,
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "ec2.us-east-2.api.aws",
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "us-east-2-fips",
}: endpoint{
- Hostname: "ec2-fips.us-east-2.amazonaws.com",
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
@@ -7946,26 +10758,38 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ec2-fips.us-west-1.amazonaws.com",
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
- Variant: dualStackVariant,
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "ec2.us-west-2.api.aws",
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
},
endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "us-west-2-fips",
}: endpoint{
- Hostname: "ec2-fips.us-west-2.amazonaws.com",
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
},
},
- "ecs": service{
+ "ebs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -7985,6 +10809,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7994,18 +10821,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ebs-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8015,10 +10866,28 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ebs-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "ecs-fips.us-east-1.amazonaws.com",
+ Hostname: "ebs-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -8027,7 +10896,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "ecs-fips.us-east-2.amazonaws.com",
+ Hostname: "ebs-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -8036,7 +10905,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "ecs-fips.us-west-1.amazonaws.com",
+ Hostname: "ebs-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -8045,12 +10914,15 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "ecs-fips.us-west-2.amazonaws.com",
+ Hostname: "ebs-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8067,7 +10939,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ecs-fips.us-east-1.amazonaws.com",
+ Hostname: "ebs-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -8076,7 +10948,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ecs-fips.us-east-2.amazonaws.com",
+ Hostname: "ebs-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -8085,7 +10957,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ecs-fips.us-west-1.amazonaws.com",
+ Hostname: "ebs-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -8094,43 +10966,15 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ecs-fips.us-west-2.amazonaws.com",
+ Hostname: "ebs-fips.us-west-2.amazonaws.com",
},
},
},
- "edge.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "eks": service{
+ "ec2": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
Protocols: []string{"http", "https"},
},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.{region}.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
},
Endpoints: serviceEndpoints{
endpointKey{
@@ -8151,6 +10995,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8160,31 +11013,79 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ec2-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ec2-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ec2-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ec2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "fips.eks.us-east-1.amazonaws.com",
+ Hostname: "ec2-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -8193,7 +11094,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "fips.eks.us-east-2.amazonaws.com",
+ Hostname: "ec2-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -8202,7 +11103,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "fips.eks.us-west-1.amazonaws.com",
+ Hostname: "ec2-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -8211,35 +11112,59 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "fips.eks.us-west-2.amazonaws.com",
+ Hostname: "ec2-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.sa-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.eks.us-east-1.amazonaws.com",
+ Hostname: "ec2-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.eks.us-east-2.amazonaws.com",
+ Hostname: "ec2-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -8248,20 +11173,26 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.eks.us-west-1.amazonaws.com",
+ Hostname: "ec2-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fips.eks.us-west-2.amazonaws.com",
+ Hostname: "ec2-fips.us-west-2.amazonaws.com",
},
},
},
- "elasticache": service{
+ "ecs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -8281,6 +11212,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8290,18 +11224,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8312,14 +11258,44 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ Hostname: "ecs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ecs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ecs-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ecs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8336,16 +11312,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticache-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ecs-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -8354,16 +11321,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticache-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "ecs-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -8372,16 +11330,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ecs-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -8390,20 +11339,44 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticache-fips.us-west-2.amazonaws.com",
+ Hostname: "ecs-fips.us-west-2.amazonaws.com",
},
+ },
+ },
+ "edge.sagemaker": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-west-2-fips",
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "eks": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ defaultKey{
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticache-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "fips.eks.{region}.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
},
},
- },
- "elasticbeanstalk": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -8423,6 +11396,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -8432,18 +11408,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8456,7 +11444,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
+ Hostname: "fips.eks.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -8465,7 +11453,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
+ Hostname: "fips.eks.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -8474,7 +11462,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
+ Hostname: "fips.eks.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -8483,12 +11471,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
+ Hostname: "fips.eks.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -8502,7 +11496,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
+ Hostname: "fips.eks.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -8511,7 +11505,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
+ Hostname: "fips.eks.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -8520,7 +11514,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
+ Hostname: "fips.eks.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -8529,372 +11523,256 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
+ Hostname: "fips.eks.us-west-2.amazonaws.com",
},
},
},
- "elasticfilesystem": service{
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
+ Hostname: "eks-auth.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
+ Hostname: "eks-auth.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
+ Hostname: "eks-auth.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
+ Hostname: "eks-auth.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
+ Hostname: "eks-auth.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "eks-auth.ap-south-1.api.aws",
+ },
endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
+ Region: "ap-south-2",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
+ Hostname: "eks-auth.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
+ Hostname: "eks-auth.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
+ Hostname: "eks-auth.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "eks-auth.ap-southeast-3.api.aws",
+ },
endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
+ Region: "ap-southeast-4",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
+ Hostname: "eks-auth.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "eks-auth.ca-central-1.api.aws",
+ },
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
+ Region: "ca-west-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
+ Hostname: "eks-auth.ca-west-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "eks-auth.eu-central-1.api.aws",
+ },
endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
+ Region: "eu-central-2",
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
+ Hostname: "eks-auth.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
+ Hostname: "eks-auth.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "eks-auth.eu-south-1.api.aws",
+ },
endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
+ Region: "eu-south-2",
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
+ Hostname: "eks-auth.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
+ Hostname: "eks-auth.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
+ Hostname: "eks-auth.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
+ Hostname: "eks-auth.eu-west-3.api.aws",
},
endpointKey{
- Region: "fips-af-south-1",
+ Region: "il-central-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.il-central-1.api.aws",
},
endpointKey{
- Region: "fips-ap-east-1",
+ Region: "me-central-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.me-central-1.api.aws",
},
endpointKey{
- Region: "fips-ap-northeast-1",
+ Region: "me-south-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.me-south-1.api.aws",
},
endpointKey{
- Region: "fips-ap-northeast-2",
+ Region: "sa-east-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.sa-east-1.api.aws",
},
endpointKey{
- Region: "fips-ap-northeast-3",
+ Region: "us-east-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.us-east-1.api.aws",
},
endpointKey{
- Region: "fips-ap-south-1",
+ Region: "us-east-2",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.us-east-2.api.aws",
},
endpointKey{
- Region: "fips-ap-southeast-1",
+ Region: "us-west-1",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.us-west-1.api.aws",
},
endpointKey{
- Region: "fips-ap-southeast-2",
+ Region: "us-west-2",
}: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "eks-auth.us-west-2.api.aws",
},
+ },
+ },
+ "elasticache": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "fips-ap-southeast-3",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
- },
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-northeast-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-north-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-northeast-2",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-southeast-2",
+ }: endpoint{},
endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
}: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
- },
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
- },
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -8902,7 +11780,16 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticache-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "elasticache-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
@@ -8911,7 +11798,16 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticache-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "elasticache-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
@@ -8920,7 +11816,16 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
@@ -8929,16 +11834,20 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticache-fips.us-west-2.amazonaws.com",
},
- },
- },
- "elasticloadbalancing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "elasticache-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
},
+ },
+ "elasticbeanstalk": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -8991,7 +11900,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -9000,7 +11909,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -9009,7 +11918,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -9018,14 +11927,14 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-central-1",
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
@@ -9040,7 +11949,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -9049,7 +11958,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -9058,7 +11967,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -9067,45 +11976,111 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
},
},
},
- "elasticmapreduce": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "{region}.{service}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
+ "elasticfilesystem": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -9113,299 +12088,318 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: fipsVariant,
}: endpoint{
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
+ Region: "eu-west-3",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
+ },
+ endpointKey{
+ Region: "fips-af-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "af-south-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "fips-ap-east-1",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ap-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "fips-ap-northeast-1",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "ap-northeast-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "fips-ap-northeast-2",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "ap-northeast-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "fips-ap-northeast-3",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ap-northeast-3",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "fips-ap-south-1",
}: endpoint{
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-ap-south-2",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "fips-ap-southeast-1",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
+ Region: "fips-ap-southeast-2",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "fips-ap-southeast-3",
}: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ Deprecated: boxedTrue,
},
- },
- },
- "elastictranscoder": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "email": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "fips-ap-southeast-4",
}: endpoint{
- Hostname: "email-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ap-southeast-4",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "email-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-ca-west-1",
}: endpoint{
- Hostname: "email-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "fips-eu-central-1",
}: endpoint{
- Hostname: "email-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ Deprecated: boxedTrue,
},
- },
- },
- "emr-containers": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
+ Region: "fips-eu-north-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
+ Region: "fips-eu-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
+ Region: "fips-eu-west-1",
}: endpoint{
- Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ Region: "fips-eu-west-2",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "fips-eu-west-3",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "fips-il-central-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
+ Region: "fips-me-south-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "fips-ca-central-1",
+ Region: "fips-sa-east-1",
}: endpoint{
- Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "sa-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -9414,7 +12408,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -9423,7 +12417,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -9432,87 +12426,74 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "il-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com",
},
endpointKey{
- Region: "us-east-2",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "me-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-1",
+ Region: "me-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "me-south-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-2",
+ Region: "sa-east-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "sa-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
},
- },
- },
- "emr-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "us-east-1",
+ Region: "us-west-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -9521,25 +12502,16 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
},
},
},
- "entitlement.marketplace": service{
+ "elasticloadbalancing": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
+ Protocols: []string{"https"},
},
},
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "es": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -9559,6 +12531,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -9568,18 +12543,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -9590,14 +12577,44 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9614,16 +12631,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "es-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -9632,16 +12640,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "es-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "es-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -9650,16 +12649,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -9668,20 +12658,17 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "es-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "es-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
},
},
},
- "events": service{
+ "elasticmapreduce": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -9701,6 +12688,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -9710,11 +12700,34 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
+ }: endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ endpointKey{
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
@@ -9722,6 +12735,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -9731,10 +12747,28 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "events-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -9743,7 +12777,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "events-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -9752,7 +12786,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "events-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -9761,12 +12795,15 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "events-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9778,21 +12815,30 @@ var awsPartition = partition{
}: endpoint{},
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "events-fips.us-east-1.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "events-fips.us-east-2.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -9801,7 +12847,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "events-fips.us-west-1.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -9810,82 +12856,23 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "events-fips.us-west-2.amazonaws.com",
+ Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
},
},
},
- "evidently": service{
+ "elastictranscoder": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "evidently.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "evidently.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "evidently.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "evidently.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "evidently.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "evidently.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "evidently.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "evidently.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "evidently.us-west-2.amazonaws.com",
- },
- },
- },
- "finspace": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-southeast-1",
}: endpoint{},
- },
- },
- "finspace-api": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
@@ -9894,21 +12881,18 @@ var awsPartition = partition{
Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "firehose": service{
+ "email": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -9933,6 +12917,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -9951,10 +12941,19 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "email-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "firehose-fips.us-east-1.amazonaws.com",
+ Hostname: "email-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -9963,7 +12962,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "firehose-fips.us-east-2.amazonaws.com",
+ Hostname: "email-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -9972,7 +12971,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "firehose-fips.us-west-1.amazonaws.com",
+ Hostname: "email-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -9981,12 +12980,15 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "firehose-fips.us-west-2.amazonaws.com",
+ Hostname: "email-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -10000,7 +13002,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "firehose-fips.us-east-1.amazonaws.com",
+ Hostname: "email-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -10009,7 +13011,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "firehose-fips.us-east-2.amazonaws.com",
+ Hostname: "email-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -10018,7 +13020,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "firehose-fips.us-west-1.amazonaws.com",
+ Hostname: "email-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -10027,83 +13029,39 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "firehose-fips.us-west-2.amazonaws.com",
+ Hostname: "email-fips.us-west-2.amazonaws.com",
},
},
},
- "fms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
+ "emr-containers": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.af-south-1.amazonaws.com",
- },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-east-1.amazonaws.com",
- },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
- },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
- },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-south-1.amazonaws.com",
- },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
- },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
- },
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -10111,17 +13069,14 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-central-1.amazonaws.com",
- },
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -10129,177 +13084,30 @@ var awsPartition = partition{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-south-1.amazonaws.com",
- },
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-1.amazonaws.com",
- },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-2.amazonaws.com",
- },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "fips-af-south-1",
- }: endpoint{
- Hostname: "fms-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-east-1",
- }: endpoint{
- Hostname: "fms-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "fms-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "fms-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "fms-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "fms-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "fms-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "fms-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "fms-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "fms-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "fms-fips.us-east-1.amazonaws.com",
+ Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -10308,7 +13116,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "fms-fips.us-east-2.amazonaws.com",
+ Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -10317,7 +13125,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "fms-fips.us-west-1.amazonaws.com",
+ Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -10326,30 +13134,21 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "fms-fips.us-west-2.amazonaws.com",
+ Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-south-1",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.me-south-1.amazonaws.com",
- },
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.sa-east-1.amazonaws.com",
- },
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -10357,7 +13156,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fms-fips.us-east-1.amazonaws.com",
+ Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -10366,7 +13165,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fms-fips.us-east-2.amazonaws.com",
+ Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -10375,7 +13174,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fms-fips.us-west-1.amazonaws.com",
+ Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -10384,116 +13183,82 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fms-fips.us-west-2.amazonaws.com",
+ Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
},
},
},
- "forecast": service{
+ "emr-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "ap-east-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "forecast-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "forecast-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "forecast-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecast-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
+ Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecast-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "forecast-fips.us-west-2.amazonaws.com",
+ Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com",
},
- },
- },
- "forecastquery": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -10502,21 +13267,39 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "emr-serverless-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -10524,7 +13307,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -10533,7 +13316,16 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -10542,310 +13334,359 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
+ Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
},
},
},
- "frauddetector": service{
+ "entitlement.marketplace": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ },
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
},
},
- "fsx": service{
+ "es": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.af-south-1.api.aws",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-east-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-northeast-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-3",
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-northeast-3.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ Hostname: "aos.ap-southeast-3.api.aws",
},
endpointKey{
- Region: "eu-central-1",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ap-southeast-4.api.aws",
+ },
endpointKey{
- Region: "eu-south-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ca-central-1.api.aws",
+ },
endpointKey{
- Region: "eu-west-2",
+ Region: "ca-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-central-1.api.aws",
},
endpointKey{
- Region: "fips-prod-ca-central-1",
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-central-2.api.aws",
},
endpointKey{
- Region: "fips-prod-us-east-1",
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-north-1.api.aws",
},
endpointKey{
- Region: "fips-prod-us-east-2",
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-south-1.api.aws",
},
endpointKey{
- Region: "fips-prod-us-west-1",
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-south-2.api.aws",
},
endpointKey{
- Region: "fips-prod-us-west-2",
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-west-1.api.aws",
},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-west-2.api.aws",
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.eu-west-3.api.aws",
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "fips",
}: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
+ Hostname: "es-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.il-central-1.api.aws",
},
endpointKey{
- Region: "me-south-1",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "prod-ca-central-1",
+ Region: "me-central-1",
+ Variant: dualStackVariant,
}: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.me-central-1.api.aws",
},
endpointKey{
- Region: "prod-ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
- Region: "prod-us-east-1",
+ Region: "me-south-1",
+ Variant: dualStackVariant,
}: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.me-south-1.api.aws",
},
endpointKey{
- Region: "prod-us-east-1",
- Variant: fipsVariant,
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.sa-east-1.api.aws",
},
endpointKey{
- Region: "prod-us-east-2",
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
}: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.us-east-1.api.aws",
},
endpointKey{
- Region: "prod-us-east-2",
+ Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "es-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "prod-us-west-1",
+ Region: "us-east-1-fips",
}: endpoint{
+ Hostname: "es-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "prod-us-west-1",
- Variant: fipsVariant,
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "aos.us-east-2.api.aws",
},
endpointKey{
- Region: "prod-us-west-2",
+ Region: "us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "es-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "prod-us-west-2",
- Variant: fipsVariant,
+ Region: "us-east-2-fips",
}: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ Hostname: "es-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "us-west-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "us-west-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
+ Hostname: "aos.us-west-1.api.aws",
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
+ Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
+ Hostname: "es-fips.us-west-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
+ Region: "us-west-1-fips",
}: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ Hostname: "es-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "es-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
},
},
- "gamelift": service{
+ "events": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -10865,24 +13706,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -10892,6 +13751,48 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "events-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "events-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "events-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "events-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -10901,25 +13802,91 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "events-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "events-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "events-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "events-fips.us-west-2.amazonaws.com",
+ },
},
},
- "gamesparks": service{
+ "evidently": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "evidently.ap-northeast-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "evidently.ap-southeast-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "evidently.ap-southeast-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "evidently.eu-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "evidently.eu-north-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "evidently.eu-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "evidently.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "evidently.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "evidently.us-west-2.amazonaws.com",
+ },
},
},
- "geo": service{
+ "finspace": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
@@ -10931,14 +13898,17 @@ var awsPartition = partition{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -10950,12 +13920,26 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "glacier": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
+ "finspace-api": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
},
+ },
+ "firehose": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -10975,6 +13959,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -10984,24 +13971,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.ca-central-1.amazonaws.com",
- },
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11011,19 +14004,10 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "glacier-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "glacier-fips.us-east-1.amazonaws.com",
+ Hostname: "firehose-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -11032,7 +14016,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "glacier-fips.us-east-2.amazonaws.com",
+ Hostname: "firehose-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -11041,7 +14025,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "glacier-fips.us-west-1.amazonaws.com",
+ Hostname: "firehose-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -11050,12 +14034,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "glacier-fips.us-west-2.amazonaws.com",
+ Hostname: "firehose-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -11069,7 +14059,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "glacier-fips.us-east-1.amazonaws.com",
+ Hostname: "firehose-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -11078,7 +14068,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "glacier-fips.us-east-2.amazonaws.com",
+ Hostname: "firehose-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -11087,7 +14077,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "glacier-fips.us-west-1.amazonaws.com",
+ Hostname: "firehose-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -11096,231 +14086,407 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "glacier-fips.us-west-2.amazonaws.com",
+ Hostname: "firehose-fips.us-west-2.amazonaws.com",
},
},
},
- "glue": service{
+ "fms": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.af-south-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
+ Region: "ap-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-east-1.amazonaws.com",
+ },
endpointKey{
- Region: "ap-northeast-2",
+ Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.eu-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.eu-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.eu-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.eu-west-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "eu-west-3",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "glue-fips.us-east-1.amazonaws.com",
+ Hostname: "fms-fips.eu-west-3.amazonaws.com",
+ },
+ endpointKey{
+ Region: "fips-af-south-1",
+ }: endpoint{
+ Hostname: "fms-fips.af-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "af-south-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "fips-ap-east-1",
}: endpoint{
- Hostname: "glue-fips.us-east-2.amazonaws.com",
+ Hostname: "fms-fips.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "ap-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "fips-ap-northeast-1",
}: endpoint{
- Hostname: "glue-fips.us-west-1.amazonaws.com",
+ Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "ap-northeast-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "fips-ap-northeast-2",
}: endpoint{
- Hostname: "glue-fips.us-west-2.amazonaws.com",
+ Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ap-northeast-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-ap-south-1",
}: endpoint{
- Hostname: "glue-fips.us-east-1.amazonaws.com",
+ Hostname: "fms-fips.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "fips-ap-southeast-1",
}: endpoint{
- Hostname: "glue-fips.us-east-2.amazonaws.com",
+ Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
+ Region: "fips-ap-southeast-2",
}: endpoint{
- Hostname: "glue-fips.us-west-1.amazonaws.com",
+ Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "fms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "fips-ca-west-1",
}: endpoint{
- Hostname: "glue-fips.us-west-2.amazonaws.com",
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
},
- },
- },
- "grafana": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "fips-eu-central-1",
}: endpoint{
- Hostname: "grafana.ap-northeast-1.amazonaws.com",
+ Hostname: "fms-fips.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-1",
+ Region: "eu-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "fips-eu-south-1",
}: endpoint{
- Hostname: "grafana.ap-northeast-2.amazonaws.com",
+ Hostname: "fms-fips.eu-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-2",
+ Region: "eu-south-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "fips-eu-west-1",
}: endpoint{
- Hostname: "grafana.ap-southeast-1.amazonaws.com",
+ Hostname: "fms-fips.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-southeast-1",
+ Region: "eu-west-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "fips-eu-west-2",
}: endpoint{
- Hostname: "grafana.ap-southeast-2.amazonaws.com",
+ Hostname: "fms-fips.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-southeast-2",
+ Region: "eu-west-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-central-1",
+ Region: "fips-eu-west-3",
}: endpoint{
- Hostname: "grafana.eu-central-1.amazonaws.com",
+ Hostname: "fms-fips.eu-west-3.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-central-1",
+ Region: "eu-west-3",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-west-1",
+ Region: "fips-me-south-1",
}: endpoint{
- Hostname: "grafana.eu-west-1.amazonaws.com",
+ Hostname: "fms-fips.me-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-1",
+ Region: "me-south-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-west-2",
+ Region: "fips-sa-east-1",
}: endpoint{
- Hostname: "grafana.eu-west-2.amazonaws.com",
+ Hostname: "fms-fips.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-2",
+ Region: "sa-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "grafana.us-east-1.amazonaws.com",
+ Hostname: "fms-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "grafana.us-east-2.amazonaws.com",
+ Hostname: "fms-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
+ Region: "fips-us-west-1",
}: endpoint{
- Hostname: "grafana.us-west-2.amazonaws.com",
+ Hostname: "fms-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "fms-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
+ Deprecated: boxedTrue,
},
- },
- },
- "greengrass": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.me-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.sa-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.us-west-2.amazonaws.com",
},
},
+ },
+ "forecast": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
@@ -11337,9 +14503,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -11347,27 +14510,72 @@ var awsPartition = partition{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "forecast-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "forecast-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "forecast-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "forecast-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "forecast-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "forecast-fips.us-west-2.amazonaws.com",
+ },
},
},
- "groundstation": service{
+ "forecastquery": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "af-south-1",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -11377,16 +14585,13 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "groundstation-fips.us-east-1.amazonaws.com",
+ Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -11395,7 +14600,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "groundstation-fips.us-east-2.amazonaws.com",
+ Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -11404,18 +14609,12 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "groundstation-fips.us-west-2.amazonaws.com",
+ Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -11423,7 +14622,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "groundstation-fips.us-east-1.amazonaws.com",
+ Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -11432,7 +14631,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "groundstation-fips.us-east-2.amazonaws.com",
+ Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -11441,17 +14640,33 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "groundstation-fips.us-west-2.amazonaws.com",
+ Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
},
},
},
- "guardduty": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
+ "frauddetector": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
},
+ },
+ "fsx": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -11471,6 +14686,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -11480,384 +14698,279 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-south-1",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1-fips",
+ Region: "fips-ca-west-1",
}: endpoint{
- Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "fips-prod-ca-central-1",
}: endpoint{
- Hostname: "guardduty-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2-fips",
+ Region: "fips-prod-ca-west-1",
}: endpoint{
- Hostname: "guardduty-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
+ Region: "fips-prod-us-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
+ Region: "fips-prod-us-east-2",
}: endpoint{
- Hostname: "guardduty-fips.us-west-1.amazonaws.com",
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1-fips",
+ Region: "fips-prod-us-west-1",
}: endpoint{
- Hostname: "guardduty-fips.us-west-1.amazonaws.com",
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "fips-prod-us-west-2",
}: endpoint{
- Hostname: "guardduty-fips.us-west-2.amazonaws.com",
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2-fips",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "guardduty-fips.us-west-2.amazonaws.com",
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "us-east-1",
},
Deprecated: boxedTrue,
},
- },
- },
- "health": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "health.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "global.health.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "health-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
+ Region: "fips-us-west-1",
}: endpoint{
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "fips-us-west-2",
}: endpoint{
- Hostname: "health-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- },
- },
- "healthlake": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "me-central-1",
}: endpoint{},
- },
- },
- "honeycode": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-west-2",
+ Region: "me-south-1",
}: endpoint{},
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "aws-global",
+ Region: "prod-ca-central-1",
}: endpoint{
- Hostname: "iam.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "aws-global",
+ Region: "prod-ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iam-fips.amazonaws.com",
+ Hostname: "fsx-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "aws-global-fips",
+ Region: "prod-ca-west-1",
}: endpoint{
- Hostname: "iam-fips.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "iam",
+ Region: "prod-ca-west-1",
+ Variant: fipsVariant,
}: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "iam",
- Variant: fipsVariant,
+ Region: "prod-us-east-1",
}: endpoint{
- Hostname: "iam-fips.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "iam-fips",
+ Region: "prod-us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "iam-fips.amazonaws.com",
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
- },
- },
- "identity-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
- },
endpointKey{
- Region: "us-east-1-fips",
+ Region: "prod-us-east-2",
}: endpoint{
- Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
- },
- },
- "identitystore": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "importexport": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "aws-global",
+ Region: "prod-us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "importexport.amazonaws.com",
- SignatureVersions: []string{"v2", "v4"},
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
- Service: "IngestionService",
+ Region: "us-east-2",
},
+ Deprecated: boxedTrue,
},
- },
- },
- "inspector": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
+ Region: "prod-us-west-1",
}: endpoint{
- Hostname: "inspector-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "prod-us-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "prod-us-west-2",
}: endpoint{
- Hostname: "inspector-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "prod-us-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-west-2.amazonaws.com",
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -11865,7 +14978,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-east-1.amazonaws.com",
+ Hostname: "fsx-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -11874,7 +14987,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-east-2.amazonaws.com",
+ Hostname: "fsx-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -11883,7 +14996,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-west-1.amazonaws.com",
+ Hostname: "fsx-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -11892,12 +15005,15 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-west-2.amazonaws.com",
+ Hostname: "fsx-fips.us-west-2.amazonaws.com",
},
},
},
- "inspector2": service{
+ "gamelift": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -11907,6 +15023,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -11957,15 +15076,59 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "iot": service{
+ "geo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "glacier": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "execute-api",
- },
+ Protocols: []string{"http", "https"},
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -11975,6 +15138,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -11984,6 +15150,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -11991,7 +15160,7 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iot-fips.ca-central-1.amazonaws.com",
+ Hostname: "glacier-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
@@ -11999,6 +15168,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -12011,45 +15183,45 @@ var awsPartition = partition{
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "iot-fips.ca-central-1.amazonaws.com",
+ Hostname: "glacier-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Service: "execute-api",
+ Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "iot-fips.us-east-1.amazonaws.com",
+ Hostname: "glacier-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Service: "execute-api",
+ Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "iot-fips.us-east-2.amazonaws.com",
+ Hostname: "glacier-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Service: "execute-api",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "iot-fips.us-west-1.amazonaws.com",
+ Hostname: "glacier-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Service: "execute-api",
+ Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "iot-fips.us-west-2.amazonaws.com",
+ Hostname: "glacier-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Service: "execute-api",
+ Region: "us-west-2",
},
Deprecated: boxedTrue,
},
@@ -12066,7 +15238,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iot-fips.us-east-1.amazonaws.com",
+ Hostname: "glacier-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -12075,7 +15247,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iot-fips.us-east-2.amazonaws.com",
+ Hostname: "glacier-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -12084,7 +15256,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iot-fips.us-west-1.amazonaws.com",
+ Hostname: "glacier-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -12093,66 +15265,77 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iot-fips.us-west-2.amazonaws.com",
+ Hostname: "glacier-fips.us-west-2.amazonaws.com",
},
},
},
- "iotanalytics": service{
+ "globalaccelerator": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "glue": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "ap-south-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-southeast-3",
}: endpoint{},
- },
- },
- "iotevents": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "ca-west-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
- },
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
@@ -12161,41 +15344,56 @@ var awsPartition = partition{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
+ Hostname: "glue-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "iotevents-fips.us-east-1.amazonaws.com",
+ Hostname: "glue-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "fips-us-west-1",
}: endpoint{
- Hostname: "iotevents-fips.us-east-2.amazonaws.com",
+ Hostname: "glue-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "iotevents-fips.us-west-2.amazonaws.com",
+ Hostname: "glue-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -12203,7 +15401,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotevents-fips.us-east-1.amazonaws.com",
+ Hostname: "glue-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -12212,7 +15410,16 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotevents-fips.us-east-2.amazonaws.com",
+ Hostname: "glue-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "glue-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -12221,16 +15428,16 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotevents-fips.us-west-2.amazonaws.com",
+ Hostname: "glue-fips.us-west-2.amazonaws.com",
},
},
},
- "ioteventsdata": service{
+ "grafana": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
- Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
+ Hostname: "grafana.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-1",
},
@@ -12238,23 +15445,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{
- Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
+ Hostname: "grafana.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-northeast-2",
},
},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "data.iotevents.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
- Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
+ Hostname: "grafana.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-1",
},
@@ -12262,32 +15461,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{
- Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
+ Hostname: "grafana.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-2",
},
},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "data.iotevents.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
endpointKey{
Region: "eu-central-1",
}: endpoint{
- Hostname: "data.iotevents.eu-central-1.amazonaws.com",
+ Hostname: "grafana.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-central-1",
},
@@ -12295,7 +15477,7 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{
- Hostname: "data.iotevents.eu-west-1.amazonaws.com",
+ Hostname: "grafana.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-1",
},
@@ -12303,113 +15485,45 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{
- Hostname: "data.iotevents.eu-west-2.amazonaws.com",
+ Hostname: "grafana.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-2",
},
},
endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
+ Region: "us-east-1",
}: endpoint{
- Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
+ Hostname: "grafana.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "us-east-2",
}: endpoint{
- Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
+ Hostname: "grafana.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "us-west-2",
}: endpoint{
- Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
+ Hostname: "grafana.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "data.iotevents.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "data.iotevents.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "data.iotevents.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "iotsecuredtunneling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
+ },
+ Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -12432,27 +15546,21 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
+ Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
+ Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
@@ -12461,7 +15569,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
+ Hostname: "greengrass-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -12470,36 +15578,21 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
+ Hostname: "greengrass-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
+ Hostname: "greengrass-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -12507,7 +15600,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
+ Hostname: "greengrass-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -12516,16 +15609,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
+ Hostname: "greengrass-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -12534,21 +15618,18 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
+ Hostname: "greengrass-fips.us-west-2.amazonaws.com",
},
},
},
- "iotsitewise": service{
+ "groundstation": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -12556,33 +15637,18 @@ var awsPartition = partition{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
+ Hostname: "groundstation-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -12591,7 +15657,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
+ Hostname: "groundstation-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -12600,12 +15666,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
+ Hostname: "groundstation-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -12613,7 +15685,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
+ Hostname: "groundstation-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -12622,7 +15694,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
+ Hostname: "groundstation-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -12631,144 +15703,320 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
+ Hostname: "groundstation-fips.us-west-2.amazonaws.com",
},
},
},
- "iotthingsgraph": service{
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
Defaults: endpointDefaults{
defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "iotthingsgraph",
- },
+ Protocols: []string{"https"},
},
},
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "ap-east-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
}: endpoint{},
- },
- },
- "iottwinmaker": service{
- Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
}: endpoint{},
- },
- },
- "iotwireless": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com",
+ Hostname: "guardduty-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-1",
+ Region: "us-east-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com",
+ Hostname: "guardduty-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-southeast-2",
+ Region: "us-west-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-west-1",
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
+ Hostname: "guardduty-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "guardduty-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-1",
+ Region: "us-west-2",
},
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "health": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "health.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
},
+ },
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-east-1",
+ Region: "aws-global",
}: endpoint{
- Hostname: "api.iotwireless.us-east-1.amazonaws.com",
+ Hostname: "global.health.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
- Region: "us-west-2",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "api.iotwireless.us-west-2.amazonaws.com",
+ Hostname: "health-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "health-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
},
+ Deprecated: boxedTrue,
},
},
},
- "ivs": service{
+ "healthlake": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "us-west-2",
}: endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
+ Region: "aws-global",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
+ Region: "aws-global-fips",
+ }: endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "iam",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "iam",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "iam-fips",
+ }: endpoint{
+ Hostname: "iam-fips.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
- "ivschat": service{
+ "identity-chime": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "eu-west-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
- "kafka": service{
+ "identitystore": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -12789,86 +16037,58 @@ var awsPartition = partition{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
+ Region: "ap-south-2",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ca-west-1",
}: endpoint{},
- },
- },
- "kafkaconnect": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
@@ -12887,107 +16107,141 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "kendra": service{
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-southeast-1",
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "ingest.timestream": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "ingest-fips-us-east-1",
}: endpoint{
- Hostname: "kendra-fips.us-east-1.amazonaws.com",
+ Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "ingest-fips-us-east-2",
}: endpoint{
- Hostname: "kendra-fips.us-east-2.amazonaws.com",
+ Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "ingest-fips-us-west-2",
}: endpoint{
- Hostname: "kendra-fips.us-west-2.amazonaws.com",
+ Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "ingest-us-east-1",
}: endpoint{
- Hostname: "kendra-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
+ Region: "ingest-us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kendra-fips.us-east-2.amazonaws.com",
+ Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
+ Region: "ingest-us-east-2",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-west-2",
+ Region: "ingest-us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kendra-fips.us-west-2.amazonaws.com",
+ Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
},
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
+ Region: "ingest-us-west-2",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
+ Region: "ingest-us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "ap-northeast-2",
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-3",
+ Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
+ Region: "us-west-2",
}: endpoint{},
+ },
+ },
+ "inspector": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-southeast-1",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-3",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
@@ -12995,22 +16249,16 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "kinesis-fips.us-east-1.amazonaws.com",
+ Hostname: "inspector-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -13019,7 +16267,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "kinesis-fips.us-east-2.amazonaws.com",
+ Hostname: "inspector-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -13028,7 +16276,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "kinesis-fips.us-west-1.amazonaws.com",
+ Hostname: "inspector-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -13037,21 +16285,12 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "kinesis-fips.us-west-2.amazonaws.com",
+ Hostname: "inspector-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -13059,7 +16298,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kinesis-fips.us-east-1.amazonaws.com",
+ Hostname: "inspector-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -13068,7 +16307,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kinesis-fips.us-east-2.amazonaws.com",
+ Hostname: "inspector-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -13077,7 +16316,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kinesis-fips.us-west-1.amazonaws.com",
+ Hostname: "inspector-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -13086,11 +16325,11 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kinesis-fips.us-west-2.amazonaws.com",
+ Hostname: "inspector-fips.us-west-2.amazonaws.com",
},
},
},
- "kinesisanalytics": service{
+ "inspector2": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -13126,68 +16365,13 @@ var awsPartition = partition{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kinesisvideo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
@@ -13199,442 +16383,277 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "kms-fips.af-south-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "af-south-1-fips",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "kms-fips.af-south-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "af-south-1",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
+ Region: "fips-us-west-1",
}: endpoint{
- Hostname: "kms-fips.ap-east-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-east-1-fips",
+ Region: "fips-us-west-2",
}: endpoint{
- Hostname: "kms-fips.ap-east-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-east-1",
+ Region: "us-west-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-northeast-1",
+ Region: "me-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
- },
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
- Region: "ap-northeast-1-fips",
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "inspector2-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
+ Hostname: "inspector2-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "ap-northeast-2-fips",
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "inspector2-fips.us-west-1.amazonaws.com",
},
endpointKey{
- Region: "ap-northeast-3",
+ Region: "us-west-2",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-3",
+ Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
+ Hostname: "inspector2-fips.us-west-2.amazonaws.com",
},
- endpointKey{
- Region: "ap-northeast-3-fips",
+ },
+ },
+ "internetmonitor": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
},
+ },
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "internetmonitor.af-south-1.api.aws",
+ },
endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
+ Region: "ap-east-1",
}: endpoint{
- Hostname: "kms-fips.ap-south-1.amazonaws.com",
+ Hostname: "internetmonitor.ap-east-1.api.aws",
},
endpointKey{
- Region: "ap-south-1-fips",
+ Region: "ap-northeast-1",
}: endpoint{
- Hostname: "kms-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.ap-northeast-1.api.aws",
},
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-northeast-2.api.aws",
+ },
endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
+ Region: "ap-northeast-3",
}: endpoint{
- Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
+ Hostname: "internetmonitor.ap-northeast-3.api.aws",
},
endpointKey{
- Region: "ap-southeast-1-fips",
+ Region: "ap-south-1",
}: endpoint{
- Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.ap-south-1.api.aws",
},
endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "internetmonitor.ap-south-2.api.aws",
+ },
endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
+ Region: "ap-southeast-1",
}: endpoint{
- Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
+ Hostname: "internetmonitor.ap-southeast-1.api.aws",
},
endpointKey{
- Region: "ap-southeast-2-fips",
+ Region: "ap-southeast-2",
}: endpoint{
- Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
+ Hostname: "internetmonitor.ap-southeast-3.api.aws",
},
endpointKey{
- Region: "ap-southeast-3-fips",
+ Region: "ap-southeast-4",
}: endpoint{
- Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "internetmonitor.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.ca-central-1.amazonaws.com",
+ Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
},
endpointKey{
- Region: "ca-central-1-fips",
+ Region: "ca-west-1",
}: endpoint{
- Hostname: "kms-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.ca-west-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.eu-central-1.amazonaws.com",
+ Hostname: "internetmonitor.eu-central-1.api.aws",
},
endpointKey{
- Region: "eu-central-1-fips",
+ Region: "eu-central-2",
}: endpoint{
- Hostname: "kms-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.eu-south-1.amazonaws.com",
+ Hostname: "internetmonitor.eu-south-1.api.aws",
},
endpointKey{
- Region: "eu-south-1-fips",
+ Region: "eu-south-2",
}: endpoint{
- Hostname: "kms-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1-fips",
}: endpoint{
- Hostname: "kms-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-2-fips",
}: endpoint{
- Hostname: "kms-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.eu-west-3.amazonaws.com",
+ Hostname: "internetmonitor.eu-west-3.api.aws",
},
endpointKey{
- Region: "eu-west-3-fips",
+ Region: "il-central-1",
}: endpoint{
- Hostname: "kms-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.il-central-1.api.aws",
},
endpointKey{
Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.me-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-central-1-fips",
- }: endpoint{
- Hostname: "kms-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.me-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-south-1-fips",
}: endpoint{
- Hostname: "kms-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.sa-east-1.amazonaws.com",
+ Hostname: "internetmonitor.sa-east-1.api.aws",
},
endpointKey{
- Region: "sa-east-1-fips",
+ Region: "us-east-1",
}: endpoint{
- Hostname: "kms-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.us-east-1.api.aws",
},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.us-east-1.amazonaws.com",
+ Hostname: "internetmonitor-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "us-east-1-fips",
+ Region: "us-east-2",
}: endpoint{
- Hostname: "kms-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.us-east-2.api.aws",
},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.us-east-2.amazonaws.com",
+ Hostname: "internetmonitor-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "us-east-2-fips",
+ Region: "us-west-1",
}: endpoint{
- Hostname: "kms-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.us-west-1.api.aws",
},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.us-west-1.amazonaws.com",
+ Hostname: "internetmonitor-fips.us-west-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-1-fips",
+ Region: "us-west-2",
}: endpoint{
- Hostname: "kms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor.us-west-2.api.aws",
},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "kms-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "kms-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "internetmonitor-fips.us-west-2.amazonaws.com",
},
},
},
- "lakeformation": service{
+ "iot": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -13644,9 +16663,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -13659,15 +16675,18 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iot-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -13677,42 +16696,44 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "iot-fips.ca-central-1.amazonaws.com",
+
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
+ Hostname: "iot-fips.us-east-1.amazonaws.com",
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
+ Hostname: "iot-fips.us-east-2.amazonaws.com",
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
+ Hostname: "iot-fips.us-west-1.amazonaws.com",
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
+ Hostname: "iot-fips.us-west-2.amazonaws.com",
+
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -13726,7 +16747,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
+ Hostname: "iot-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -13735,7 +16756,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
+ Hostname: "iot-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -13744,7 +16765,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
+ Hostname: "iot-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -13753,280 +16774,330 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
+ Hostname: "iot-fips.us-west-2.amazonaws.com",
},
},
},
- "lambda": service{
+ "iotanalytics": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "af-south-1",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.af-south-1.api.aws",
- },
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
- Region: "ap-east-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-east-1.api.aws",
- },
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
- Region: "ap-northeast-1",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-1.api.aws",
- },
+ Region: "us-east-1",
+ }: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-2.api.aws",
- },
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "iotevents": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-3",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-3.api.aws",
- },
+ Region: "ap-northeast-2",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-south-1.api.aws",
- },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-1.api.aws",
- },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda.ap-southeast-2.api.aws",
+ Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
},
endpointKey{
- Region: "ap-southeast-3",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-3.api.aws",
- },
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
+ Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "lambda.ca-central-1.api.aws",
+ Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "lambda.eu-central-1.api.aws",
+ Hostname: "iotevents-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "iotevents-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
+ Region: "fips-us-west-2",
}: endpoint{
- Hostname: "lambda.eu-north-1.api.aws",
+ Hostname: "iotevents-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-south-1",
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda.eu-south-1.api.aws",
+ Hostname: "iotevents-fips.us-east-1.amazonaws.com",
},
endpointKey{
- Region: "eu-west-1",
+ Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
+ Region: "us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda.eu-west-1.api.aws",
+ Hostname: "iotevents-fips.us-east-2.amazonaws.com",
},
endpointKey{
- Region: "eu-west-2",
+ Region: "us-west-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
+ Region: "us-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda.eu-west-2.api.aws",
+ Hostname: "iotevents-fips.us-west-2.amazonaws.com",
},
+ },
+ },
+ "ioteventsdata": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
+ Region: "ap-northeast-1",
}: endpoint{
- Hostname: "lambda.eu-west-3.api.aws",
+ Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
},
endpointKey{
- Region: "fips-us-east-1",
+ Region: "ap-northeast-2",
}: endpoint{
- Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ap-northeast-2",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "ap-south-1",
}: endpoint{
- Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ Hostname: "data.iotevents.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "ap-south-1",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "ap-southeast-1",
}: endpoint{
- Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "ap-southeast-1",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "ap-southeast-2",
}: endpoint{
- Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ap-southeast-2",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-central-1",
- }: endpoint{},
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "data.iotevents.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
endpointKey{
- Region: "me-south-1",
- }: endpoint{},
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
+ Region: "eu-central-1",
}: endpoint{
- Hostname: "lambda.me-south-1.api.aws",
+ Hostname: "data.iotevents.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
},
endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "data.iotevents.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
+ Region: "eu-west-2",
}: endpoint{
- Hostname: "lambda.sa-east-1.api.aws",
+ Hostname: "data.iotevents.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
},
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "lambda.us-east-1.api.aws",
+ Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
+ Region: "us-east-1",
}: endpoint{
- Hostname: "lambda.us-east-2.api.aws",
+ Hostname: "data.iotevents.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
},
endpointKey{
- Region: "us-east-2",
+ Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
+ Region: "us-east-2",
}: endpoint{
- Hostname: "lambda.us-west-1.api.aws",
+ Hostname: "data.iotevents.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
},
endpointKey{
- Region: "us-west-1",
+ Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
},
endpointKey{
Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
}: endpoint{
- Hostname: "lambda.us-west-2.api.aws",
+ Hostname: "data.iotevents.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
},
},
},
- "license-manager": service{
+ "iotfleetwise": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "af-south-1",
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
}: endpoint{},
+ },
+ },
+ "iotsecuredtunneling": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
+ },
+ },
+ Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -14036,9 +17107,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -14048,21 +17116,21 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -14072,10 +17140,19 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -14084,7 +17161,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -14093,7 +17170,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -14102,7 +17179,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -14121,7 +17198,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -14130,7 +17207,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -14139,7 +17216,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -14148,27 +17225,18 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
},
},
},
- "license-manager-user-subscriptions": service{
+ "iotsitewise": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -14182,65 +17250,53 @@ var awsPartition = partition{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
- Region: "eu-south-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
+ Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -14248,7 +17304,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -14257,16 +17313,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -14275,11 +17322,18 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
},
},
},
- "lightsail": service{
+ "iotthingsgraph": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "iotthingsgraph",
+ },
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
@@ -14287,61 +17341,28 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "logs": service{
+ "iottwinmaker": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -14352,100 +17373,213 @@ var awsPartition = partition{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
+ Region: "api-ap-northeast-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ Region: "api-ap-northeast-2",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "api-ap-south-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
+ Region: "api-ap-southeast-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "api-ap-southeast-2",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
+ Region: "api-eu-central-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
+ Region: "api-eu-west-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
endpointKey{
- Region: "fips-us-east-1",
+ Region: "api-us-east-1",
}: endpoint{
- Hostname: "logs-fips.us-east-1.amazonaws.com",
+ Hostname: "api.iottwinmaker.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "api-us-west-2",
}: endpoint{
- Hostname: "logs-fips.us-east-2.amazonaws.com",
+ Hostname: "api.iottwinmaker.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-west-2",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "data-ap-northeast-1",
}: endpoint{
- Hostname: "logs-fips.us-west-1.amazonaws.com",
+ Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "ap-northeast-1",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "data-ap-northeast-2",
}: endpoint{
- Hostname: "logs-fips.us-west-2.amazonaws.com",
+ Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "data-ap-south-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "data-ap-southeast-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "data-ap-southeast-2",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "data-eu-central-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "data-eu-west-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "data-us-east-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "data-us-west-2",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
- Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-central-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
+ Region: "fips-api-us-east-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
+ Region: "fips-api-us-west-2",
+ }: endpoint{
+ Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-data-us-east-1",
}: endpoint{
- Hostname: "logs-fips.us-east-1.amazonaws.com",
+ Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
+ Region: "fips-data-us-west-2",
+ }: endpoint{
+ Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "logs-fips.us-east-2.amazonaws.com",
+ Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "logs-fips.us-west-1.amazonaws.com",
+ Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -14454,55 +17588,80 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "logs-fips.us-west-2.amazonaws.com",
+ Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
},
},
},
- "lookoutequipment": service{
+ "iotwireless": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "api.iotwireless.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.iotwireless.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
- "lookoutmetrics": service{
+ "ivs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "lookoutvision": service{
+ "ivschat": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
@@ -14510,6 +17669,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -14519,90 +17681,37 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "m2": service{
+ "ivsrealtime": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
+ Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "eu-central-1",
}: endpoint{},
- },
- },
- "machinelearning": service{
- Endpoints: serviceEndpoints{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
- },
- },
- "macie": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "macie-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "macie-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie-fips.us-east-1.amazonaws.com",
- },
endpointKey{
Region: "us-west-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie-fips.us-west-2.amazonaws.com",
- },
},
},
- "macie2": service{
+ "kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -14622,24 +17731,54 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -14649,10 +17788,28 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "kafka-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ Hostname: "kafka-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -14661,7 +17818,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ Hostname: "kafka-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -14670,7 +17827,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ Hostname: "kafka-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -14679,12 +17836,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ Hostname: "kafka-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -14698,7 +17861,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ Hostname: "kafka-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -14707,7 +17870,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ Hostname: "kafka-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -14716,73 +17879,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "macie2-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie2-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "managedblockchain": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "marketplacecommerceanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "media-pipelines-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "kafka-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -14791,24 +17888,12 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "kafka-fips.us-west-2.amazonaws.com",
},
},
},
- "mediaconnect": service{
+ "kafkaconnect": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -14824,6 +17909,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -14856,14 +17944,11 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "mediaconvert": service{
+ "kendra": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -14880,27 +17965,18 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ Hostname: "kendra-fips.ca-central-1.amazonaws.com",
},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ Hostname: "kendra-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
@@ -14909,7 +17985,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ Hostname: "kendra-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -14918,33 +17994,21 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
+ Hostname: "kendra-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ Hostname: "kendra-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -14952,7 +18016,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ Hostname: "kendra-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -14961,16 +18025,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
+ Hostname: "kendra-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -14979,152 +18034,192 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ Hostname: "kendra-fips.us-west-2.amazonaws.com",
},
},
},
- "medialive": service{
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-east-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-2",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-northeast-3.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-2.api.aws",
+ },
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-3.api.aws",
+ },
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "kendra-ranking.ap-southeast-4.api.aws",
+ },
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kendra-ranking-fips.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.eu-west-3.api.aws",
+ },
endpointKey{
- Region: "fips-us-east-1",
+ Region: "il-central-1",
}: endpoint{
- Hostname: "medialive-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "kendra-ranking.il-central-1.api.aws",
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "me-central-1",
}: endpoint{
- Hostname: "medialive-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "kendra-ranking.me-central-1.api.aws",
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "me-south-1",
}: endpoint{
- Hostname: "medialive-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "kendra-ranking.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.sa-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "medialive-fips.us-east-1.amazonaws.com",
+ Hostname: "kendra-ranking-fips.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-2",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "medialive-fips.us-east-2.amazonaws.com",
+ Hostname: "kendra-ranking-fips.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-2",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kendra-ranking.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "medialive-fips.us-west-2.amazonaws.com",
+ Hostname: "kendra-ranking-fips.us-west-2.api.aws",
},
},
},
- "mediapackage": service{
+ "kinesis": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
+ Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-east-1",
}: endpoint{},
- },
- },
- "mediapackage-vod": service{
- Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -15132,62 +18227,46 @@ var awsPartition = partition{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
+ Region: "ap-south-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ca-west-1",
}: endpoint{},
- },
- },
- "mediastore": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-northeast-1",
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
@@ -15196,152 +18275,95 @@ var awsPartition = partition{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "meetings-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
+ Hostname: "kinesis-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1-fips",
+ Region: "fips-us-east-2",
}: endpoint{
- Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
+ Hostname: "kinesis-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "us-east-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "fips-us-west-1",
}: endpoint{
- Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
+ Hostname: "kinesis-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2-fips",
+ Region: "fips-us-west-2",
}: endpoint{
- Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
+ Hostname: "kinesis-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- },
- },
- "memory-db": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "me-south-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "sa-east-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
+ Region: "us-east-1",
}: endpoint{},
endpointKey{
- Region: "fips",
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "memory-db-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
+ Hostname: "kinesis-fips.us-east-1.amazonaws.com",
},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "messaging-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesis-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
- Region: "us-east-1",
+ Region: "us-west-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
+ Hostname: "kinesis-fips.us-west-1.amazonaws.com",
},
endpointKey{
- Region: "us-east-1-fips",
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "kinesis-fips.us-west-2.amazonaws.com",
},
},
},
- "metering.marketplace": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- },
+ "kinesisanalytics": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -15361,6 +18383,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -15370,18 +18395,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -15391,6 +18428,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -15414,32 +18454,7 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "mgh": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mgn": service{
+ "kinesisvideo": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -15453,9 +18468,6 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -15465,21 +18477,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -15489,9 +18492,6 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -15501,586 +18501,547 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "migrationhub-strategy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
- "mobileanalytics": service{
+ "kms": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "models-v2-lex": service{
- Endpoints: serviceEndpoints{
+ Region: "ProdFips",
+ }: endpoint{
+ Hostname: "kms-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
+ Region: "af-south-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.af-south-1.amazonaws.com",
+ },
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "models.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
+ Region: "af-south-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.af-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Service: "lex",
+ Region: "af-south-1",
},
+ Deprecated: boxedTrue,
},
- defaultKey{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "models-fips.lex.{region}.{dnsSuffix}",
+ Hostname: "kms-fips.ap-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ap-east-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ap-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Service: "lex",
+ Region: "ap-east-1",
},
+ Deprecated: boxedTrue,
},
- },
- Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "ap-northeast-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
+ },
endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
+ Region: "ap-northeast-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "us-east-1",
+ Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-northeast-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
},
endpointKey{
- Region: "us-east-1-fips",
+ Region: "ap-northeast-2-fips",
}: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ap-northeast-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "ap-northeast-3",
Variant: fipsVariant,
}: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
},
endpointKey{
- Region: "us-west-2-fips",
+ Region: "ap-northeast-3-fips",
}: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ap-northeast-3",
},
Deprecated: boxedTrue,
},
- },
- },
- "monitoring": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
+ Region: "ap-south-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "monitoring-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "kms-fips.ap-south-1.amazonaws.com",
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "ap-south-1-fips",
}: endpoint{
- Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ Hostname: "kms-fips.ap-south-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "ap-south-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "monitoring-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "kms-fips.ap-south-2.amazonaws.com",
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "ap-south-2-fips",
}: endpoint{
- Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.ap-south-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "ap-south-2",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "ap-southeast-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "ap-southeast-1-fips",
}: endpoint{
- Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "ap-southeast-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
+ Region: "ap-southeast-2-fips",
}: endpoint{
- Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ Deprecated: boxedTrue,
},
- },
- },
- "mq": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
+ Region: "ap-southeast-3",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
+ },
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ Region: "ap-southeast-3-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-north-1",
+ Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
+ },
endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
+ Region: "ap-southeast-4-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "eu-west-2",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
- Region: "fips-us-east-1",
+ Region: "ca-central-1-fips",
}: endpoint{
- Hostname: "mq-fips.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-1",
+ Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-east-2",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "mq-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
},
endpointKey{
- Region: "fips-us-west-1",
+ Region: "ca-west-1-fips",
}: endpoint{
- Hostname: "mq-fips.us-west-1.amazonaws.com",
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-1",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "fips-us-west-2",
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "mq-fips.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.eu-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-west-2",
+ Region: "eu-central-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "eu-central-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mq-fips.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.eu-central-2.amazonaws.com",
},
endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
+ Region: "eu-central-2-fips",
}: endpoint{
- Hostname: "mq-fips.us-east-2.amazonaws.com",
+ Hostname: "kms-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-west-1",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "eu-north-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mq-fips.us-west-1.amazonaws.com",
+ Hostname: "kms-fips.eu-north-1.amazonaws.com",
},
endpointKey{
- Region: "us-west-2",
+ Region: "eu-north-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "us-west-2",
+ Region: "eu-south-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "mq-fips.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.eu-south-1.amazonaws.com",
},
- },
- },
- "mturk-requester": service{
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "sandbox",
+ Region: "eu-south-1-fips",
}: endpoint{
- Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-east-1",
+ Region: "eu-south-2",
}: endpoint{},
- },
- },
- "neptune": service{
- Endpoints: serviceEndpoints{
endpointKey{
- Region: "ap-east-1",
+ Region: "eu-south-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
+ Hostname: "kms-fips.eu-south-2.amazonaws.com",
},
endpointKey{
- Region: "ap-northeast-1",
+ Region: "eu-south-2-fips",
}: endpoint{
- Hostname: "rds.ap-northeast-1.amazonaws.com",
+ Hostname: "kms-fips.eu-south-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-northeast-1",
+ Region: "eu-south-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-northeast-2",
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
+ Hostname: "kms-fips.eu-west-1.amazonaws.com",
},
endpointKey{
- Region: "ap-south-1",
+ Region: "eu-west-1-fips",
}: endpoint{
- Hostname: "rds.ap-south-1.amazonaws.com",
+ Hostname: "kms-fips.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-south-1",
+ Region: "eu-west-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ap-southeast-1",
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
+ Hostname: "kms-fips.eu-west-2.amazonaws.com",
},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "eu-west-2-fips",
}: endpoint{
- Hostname: "rds.ap-southeast-2.amazonaws.com",
+ Hostname: "kms-fips.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ap-southeast-2",
+ Region: "eu-west-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ca-central-1",
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
+ Hostname: "kms-fips.eu-west-3.amazonaws.com",
},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-west-3-fips",
}: endpoint{
- Hostname: "rds.eu-central-1.amazonaws.com",
+ Hostname: "kms-fips.eu-west-3.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-central-1",
+ Region: "eu-west-3",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-north-1",
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
+ Hostname: "kms-fips.il-central-1.amazonaws.com",
},
endpointKey{
- Region: "eu-west-1",
+ Region: "il-central-1-fips",
}: endpoint{
- Hostname: "rds.eu-west-1.amazonaws.com",
+ Hostname: "kms-fips.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-1",
+ Region: "il-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "eu-west-2",
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
+ Hostname: "kms-fips.me-central-1.amazonaws.com",
},
endpointKey{
- Region: "eu-west-3",
+ Region: "me-central-1-fips",
}: endpoint{
- Hostname: "rds.eu-west-3.amazonaws.com",
+ Hostname: "kms-fips.me-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-3",
+ Region: "me-central-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.me-south-1.amazonaws.com",
+ Hostname: "kms-fips.me-south-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "me-south-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.me-south-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "me-south-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.sa-east-1.amazonaws.com",
+ Hostname: "kms-fips.sa-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "sa-east-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.sa-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "sa-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.us-east-1.amazonaws.com",
+ Hostname: "kms-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.us-east-2.amazonaws.com",
+ Hostname: "kms-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "kms-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.us-west-1.amazonaws.com",
+ Hostname: "kms-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "rds.us-west-2.amazonaws.com",
+ Hostname: "kms-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "kms-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
+ Deprecated: boxedTrue,
},
},
},
- "network-firewall": service{
+ "lakeformation": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -16100,30 +19061,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
- },
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -16133,19 +19106,10 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -16154,7 +19118,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -16163,7 +19127,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -16172,12 +19136,18 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -16191,7 +19161,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -16200,7 +19170,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -16209,7 +19179,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -16218,41 +19188,3626 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
},
},
},
- "networkmanager": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
+ "lambda": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "aws-global",
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "networkmanager.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
+ Hostname: "lambda.af-south-1.api.aws",
},
- },
- },
- "nimble": service{
- Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "lambda-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "lambda-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "license-manager": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "license-manager-linux-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "license-manager-user-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "lightsail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "logs-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "logs-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "logs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "logs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "logs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "logs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "lookoutequipment": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "lookoutmetrics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "lookoutvision": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "m2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ },
+ },
+ "machinelearning": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "macie2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "macie2-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "macie2-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "managedblockchain": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "managedblockchain-query": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "media-pipelines-chime": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "mediaconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediaconvert": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "medialive": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "medialive-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "medialive-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "medialive-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "mediapackage": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediapackage-vod": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediapackagev2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mediastore": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "meetings-chime": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "memory-db": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "memory-db-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "messaging-chime": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mgh": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mgn": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "migrationhub-orchestrator": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "migrationhub-strategy": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "models-v2-lex": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.{region}.{dnsSuffix}",
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "models-fips.lex.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "monitoring-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "monitoring-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "mq": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "mq-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "mq-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "mq-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "mq-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mq-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mq-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mq-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mq-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "sandbox",
+ }: endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
+ "neptune": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "rds.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "rds.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "rds.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "rds.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "rds.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "rds.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "network-firewall": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "networkmanager": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "networkmanager.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "aws-global",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-global",
+ }: endpoint{
+ Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "nimble": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "oam": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -16260,6 +22815,14 @@ var awsPartition = partition{
},
"oidc": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "oidc.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{
@@ -16300,6 +22863,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "oidc.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -16316,42 +22887,190 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
- Hostname: "oidc.ca-central-1.amazonaws.com",
+ Hostname: "oidc.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "oidc.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "oidc.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "oidc.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "oidc.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "oidc.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "oidc.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "oidc.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "oidc.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "oidc.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "oidc.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "oidc.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "oidc.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "oidc.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "oidc.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "oidc.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "oidc.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "us-west-1",
},
},
endpointKey{
- Region: "eu-central-1",
+ Region: "us-west-2",
}: endpoint{
- Hostname: "oidc.eu-central-1.amazonaws.com",
+ Hostname: "oidc.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-central-1",
+ Region: "us-west-2",
},
},
+ },
+ },
+ "omics": service{
+ Endpoints: serviceEndpoints{
endpointKey{
- Region: "eu-north-1",
+ Region: "ap-southeast-1",
}: endpoint{
- Hostname: "oidc.eu-north-1.amazonaws.com",
+ Hostname: "omics.ap-southeast-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-north-1",
+ Region: "ap-southeast-1",
},
},
endpointKey{
- Region: "eu-south-1",
+ Region: "eu-central-1",
}: endpoint{
- Hostname: "oidc.eu-south-1.amazonaws.com",
+ Hostname: "omics.eu-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-south-1",
+ Region: "eu-central-1",
},
},
endpointKey{
Region: "eu-west-1",
}: endpoint{
- Hostname: "oidc.eu-west-1.amazonaws.com",
+ Hostname: "omics.eu-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-1",
},
@@ -16359,55 +23078,67 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{
- Hostname: "oidc.eu-west-2.amazonaws.com",
+ Hostname: "omics.eu-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "eu-west-2",
},
},
endpointKey{
- Region: "eu-west-3",
+ Region: "fips-us-east-1",
}: endpoint{
- Hostname: "oidc.eu-west-3.amazonaws.com",
+ Hostname: "omics-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "eu-west-3",
+ Region: "us-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "me-south-1",
+ Region: "fips-us-west-2",
}: endpoint{
- Hostname: "oidc.me-south-1.amazonaws.com",
+ Hostname: "omics-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
- Region: "me-south-1",
+ Region: "us-west-2",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "sa-east-1",
+ Region: "il-central-1",
}: endpoint{
- Hostname: "oidc.sa-east-1.amazonaws.com",
+ Hostname: "omics.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "sa-east-1",
+ Region: "il-central-1",
},
},
endpointKey{
Region: "us-east-1",
}: endpoint{
- Hostname: "oidc.us-east-1.amazonaws.com",
+ Hostname: "omics.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
endpointKey{
- Region: "us-east-2",
+ Region: "us-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "oidc.us-east-2.amazonaws.com",
+ Hostname: "omics-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-east-2",
+ Region: "us-east-1",
},
},
endpointKey{
Region: "us-west-2",
}: endpoint{
- Hostname: "oidc.us-west-2.amazonaws.com",
+ Hostname: "omics.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "omics-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -16526,6 +23257,55 @@ var awsPartition = partition{
},
},
},
+ "osis": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -16562,39 +23342,526 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "participant.connect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "personalize": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "pi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.af-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-3.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-south-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-3.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-4.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ca-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-central-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ca-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-central-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-north-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Protocols: []string{"https"},
},
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
+ Region: "eu-south-2",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-south-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-3",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-3.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
- Hostname: "outposts-fips.ca-central-1.amazonaws.com",
+ Hostname: "pi-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ Hostname: "pi-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -16603,7 +23870,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ Hostname: "pi-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -16612,7 +23879,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ Hostname: "pi-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -16621,67 +23888,184 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ Hostname: "pi-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.il-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.me-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "me-south-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.me-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "sa-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.sa-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "outposts-fips.us-east-1.amazonaws.com",
+ Hostname: "pi-fips.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-1.api.aws",
+ Protocols: []string{"https"},
},
endpointKey{
Region: "us-east-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-east-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "outposts-fips.us-east-2.amazonaws.com",
+ Hostname: "pi-fips.us-east-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-2.api.aws",
+ Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "outposts-fips.us-west-1.amazonaws.com",
+ Hostname: "pi-fips.us-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-1.api.aws",
+ Protocols: []string{"https"},
},
endpointKey{
Region: "us-west-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-west-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "outposts-fips.us-west-2.amazonaws.com",
+ Hostname: "pi-fips.us-west-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-2.api.aws",
+ Protocols: []string{"https"},
},
},
},
- "participant.connect": service{
+ "pinpoint": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ },
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -16690,26 +24074,61 @@ var awsPartition = partition{
}: endpoint{},
endpointKey{
Region: "ca-central-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "pinpoint.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pinpoint-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "pinpoint-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
+ Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "pinpoint-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
+ Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -16717,62 +24136,58 @@ var awsPartition = partition{
},
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "pinpoint.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
+ Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
},
endpointKey{
- Region: "us-west-2",
- }: endpoint{},
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "pinpoint.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
endpointKey{
- Region: "us-west-2",
+ Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
+ Hostname: "pinpoint-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
},
- },
- },
- "personalize": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
endpointKey{
Region: "us-west-2",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "pinpoint.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
- "pi": service{
+ "pipes": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -16792,6 +24207,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -16807,12 +24225,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -16845,96 +24269,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "pinpoint": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "mobiletargeting",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "pinpoint.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "pinpoint.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
"polly": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -16949,6 +24283,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -17058,6 +24395,14 @@ var awsPartition = partition{
},
"portal.sso": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "portal.sso.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{
@@ -17098,6 +24443,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "portal.sso.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -17114,6 +24467,22 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -17122,6 +24491,14 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "portal.sso.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -17130,6 +24507,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "portal.sso.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -17146,6 +24531,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "portal.sso.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -17170,6 +24563,22 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "portal.sso.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "portal.sso.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -17202,6 +24611,14 @@ var awsPartition = partition{
Region: "us-east-2",
},
},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "portal.sso.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
@@ -17212,6 +24629,19 @@ var awsPartition = partition{
},
},
},
+ "private-networks": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"profile": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -17232,18 +24662,63 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "profile-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "profile-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "profile-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "profile-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "profile-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "profile-fips.us-west-2.amazonaws.com",
+ },
},
},
"projects.iot1click": service{
@@ -17276,9 +24751,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -17290,6 +24783,166 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "qbusiness.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "qbusiness.us-west-2.api.aws",
+ },
+ },
+ },
"qldb": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -17389,6 +25042,9 @@ var awsPartition = partition{
},
"quicksight": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -17404,6 +25060,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "api",
}: endpoint{},
@@ -17413,12 +25072,24 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -17453,6 +25124,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -17462,6 +25136,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -17471,15 +25148,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ram-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -17498,6 +25190,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ram-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -17534,6 +25235,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -17598,12 +25305,21 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -17613,15 +25329,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "rbin-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -17640,6 +25371,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -17676,6 +25416,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -17743,6 +25486,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -17752,6 +25498,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -17770,15 +25519,39 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -17788,6 +25561,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -17803,6 +25579,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "rds-fips.ca-west-1",
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "rds-fips.us-east-1",
}: endpoint{
@@ -17857,6 +25642,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "rds.ca-west-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "rds.ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "rds.us-east-1",
}: endpoint{
@@ -18135,6 +25938,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -18144,6 +25950,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18153,15 +25962,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "redshift-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18180,6 +26004,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -18216,6 +26049,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -18271,33 +26107,132 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
+ },
},
},
"rekognition": service{
@@ -18344,6 +26279,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "rekognition-fips.ca-central-1",
}: endpoint{
@@ -18617,6 +26555,97 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "resource-explorer-2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -18637,6 +26666,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -18646,18 +26678,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18703,6 +26747,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -18792,6 +26842,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -18801,18 +26854,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18822,6 +26887,48 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -18831,15 +26938,39 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+ },
},
},
"route53": service{
@@ -18918,6 +27049,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -18927,18 +27061,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18948,6 +27094,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -18970,33 +27122,81 @@ var awsPartition = partition{
},
"rum": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -19140,6 +27340,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -19149,18 +27352,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -19168,7 +27383,13 @@ var awsPartition = partition{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
@@ -19334,6 +27555,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3.dualstack.ap-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.ap-south-2.amazonaws.com",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -19369,6 +27599,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com",
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com",
+ },
endpointKey{
Region: "aws-global",
}: endpoint{
@@ -19399,6 +27638,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19408,6 +27668,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3.dualstack.eu-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.eu-central-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -19426,6 +27695,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3.dualstack.eu-south-1.amazonaws.com",
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.eu-south-2.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -19466,6 +27744,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -19502,6 +27789,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.il-central-1.amazonaws.com",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -19670,6 +27966,44 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "s3-control.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
@@ -19746,6 +28080,25 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "s3-control.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -19784,6 +28137,44 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -19833,6 +28224,55 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "s3-control.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -19852,6 +28292,25 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -19871,6 +28330,44 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -19928,6 +28425,63 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "s3-control.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "s3-control.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "s3-control.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{
@@ -20146,6 +28700,262 @@ var awsPartition = partition{
},
},
"s3-outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ },
+ },
+ "sagemaker-geospatial": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "savingsplans": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "savingsplans.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "scheduler": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -20165,21 +28975,29 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
@@ -20187,6 +29005,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20197,35 +29018,8 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -20235,49 +29029,22 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "savingsplans": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "savingsplans.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
},
},
"schemas": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -20287,6 +29054,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -20296,15 +29066,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20314,6 +29096,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -20372,145 +29160,288 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
+
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ca-central-1-fips",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
+
Deprecated: boxedTrue,
},
},
@@ -20535,6 +29466,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -20544,18 +29478,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20601,6 +29547,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -20645,6 +29597,121 @@ var awsPartition = partition{
},
},
},
+ "securitylake": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "securitylake-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "securitylake-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "securitylake-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "securitylake-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"serverlessrepo": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -20727,21 +29794,85 @@ var awsPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"servicecatalog": service{
@@ -20764,6 +29895,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -20773,18 +29907,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20794,6 +29937,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -20894,6 +30043,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -20903,6 +30055,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -20912,15 +30067,24 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -20975,6 +30139,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -21025,38 +30195,122 @@ var awsPartition = partition{
Region: "af-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-northeast-3.api.aws",
+ },
endpointKey{
- Region: "ap-northeast-1",
+ Region: "ap-south-1",
}: endpoint{},
endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-south-1.api.aws",
+ },
endpointKey{
- Region: "ap-northeast-3",
+ Region: "ap-south-2",
}: endpoint{},
endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1-fips",
}: endpoint{
@@ -21066,66 +30320,165 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-central-2.api.aws",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-north-1.api.aws",
+ },
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-south-2.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-2.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
}: endpoint{},
endpointKey{
- Region: "servicediscovery",
+ Region: "me-central-1",
+ Variant: dualStackVariant,
}: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "servicediscovery.me-central-1.api.aws",
},
endpointKey{
- Region: "servicediscovery",
- Variant: fipsVariant,
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "servicediscovery.me-south-1.api.aws",
},
endpointKey{
- Region: "servicediscovery-fips",
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
}: endpoint{
- Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "servicediscovery.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1-fips",
}: endpoint{
@@ -21138,12 +30491,24 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2-fips",
}: endpoint{
@@ -21156,12 +30521,24 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1-fips",
}: endpoint{
@@ -21174,12 +30551,24 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2-fips",
}: endpoint{
@@ -21216,6 +30605,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -21225,18 +30617,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -21246,6 +30650,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -21386,7 +30796,7 @@ var awsPartition = partition{
},
},
},
- "sms": service{
+ "signer": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -21433,7 +30843,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "sms-fips.us-east-1.amazonaws.com",
+ Hostname: "signer-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -21442,7 +30852,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "sms-fips.us-east-2.amazonaws.com",
+ Hostname: "signer-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -21451,7 +30861,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "sms-fips.us-west-1.amazonaws.com",
+ Hostname: "signer-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -21460,12 +30870,44 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "sms-fips.us-west-2.amazonaws.com",
+ Hostname: "signer-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-verification-us-east-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-east-2",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-west-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-west-2",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -21479,7 +30921,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "sms-fips.us-east-1.amazonaws.com",
+ Hostname: "signer-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -21488,7 +30930,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "sms-fips.us-east-2.amazonaws.com",
+ Hostname: "signer-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -21497,7 +30939,217 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "sms-fips.us-west-1.amazonaws.com",
+ Hostname: "signer-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "signer-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "verification-af-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-northeast-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-northeast-2",
+ }: endpoint{
+ Hostname: "verification.signer.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-southeast-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-southeast-2",
+ }: endpoint{
+ Hostname: "verification.signer.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-ca-central-1",
+ }: endpoint{
+ Hostname: "verification.signer.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-central-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-north-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-2",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-3",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "verification-me-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-sa-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-east-2",
+ }: endpoint{
+ Hostname: "verification.signer.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-west-2",
+ }: endpoint{
+ Hostname: "verification.signer.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "simspaceweaver": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sms-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-west-2",
@@ -21512,36 +31164,165 @@ var awsPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
+ },
},
},
"snowball": service{
@@ -21804,6 +31585,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -21876,6 +31663,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -21885,18 +31675,36 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sns-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -21906,6 +31714,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "sns-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -21942,6 +31759,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22015,6 +31835,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22024,18 +31847,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22081,6 +31916,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22151,6 +31989,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22160,6 +32001,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -22169,15 +32013,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ssm-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22196,6 +32055,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -22232,6 +32100,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22279,6 +32150,118 @@ var awsPartition = partition{
},
},
},
+ "ssm-contacts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"ssm-incidents": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -22299,6 +32282,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -22314,25 +32303,242 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "ssm-sap": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
+ },
},
},
"sso": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -22348,24 +32554,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22375,6 +32599,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22387,6 +32617,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -22412,6 +32645,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22421,18 +32657,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22478,6 +32726,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22545,6 +32796,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22554,6 +32808,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -22572,15 +32829,39 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22591,14 +32872,11 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -22707,6 +32985,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22716,18 +32997,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22737,6 +33030,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "local",
}: endpoint{
@@ -22790,6 +33086,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22799,6 +33098,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "aws-global",
}: endpoint{
@@ -22810,15 +33112,24 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22828,6 +33139,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -22957,6 +33271,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -22966,18 +33283,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -23023,6 +33352,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -23090,6 +33422,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -23099,18 +33434,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -23156,6 +33503,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -23223,6 +33573,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -23232,18 +33585,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -23253,6 +33618,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -23276,41 +33644,115 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "tax": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "tax.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"textract": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-northeast-2.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-south-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-southeast-2.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.ca-central-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-central-1.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-2.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-3.api.aws",
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -23359,39 +33801,146 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-east-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-west-2.api.aws",
+ },
+ },
+ },
+ "thinclient": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "tnb": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
},
},
"transcribe": service{
@@ -23543,12 +34092,21 @@ var awsPartition = partition{
},
"transcribestreaming": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
@@ -23706,12 +34264,21 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -23721,15 +34288,30 @@ var awsPartition = partition{
}: endpoint{
Hostname: "transfer-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -23748,6 +34330,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -23784,6 +34375,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -23910,6 +34507,21 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -23930,48 +34542,388 @@ var awsPartition = partition{
},
},
},
- "voiceid": service{
+ "verifiedpermissions": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
-
+ Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
-
+ Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "voice-chime": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "voiceid": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voiceid-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "voiceid-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "voiceid-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "voiceid-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voiceid-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "voiceid-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "vpc-lattice": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
}: endpoint{},
},
},
@@ -24138,6 +35090,23 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "waf-regional.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -24189,6 +35158,23 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "waf-regional.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -24223,6 +35209,23 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "waf-regional.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -24257,6 +35260,23 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "waf-regional.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -24362,6 +35382,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-south-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ap-southeast-1",
}: endpoint{
@@ -24389,6 +35418,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-southeast-4",
+ }: endpoint{
+ Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -24407,6 +35445,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-north-1",
}: endpoint{
@@ -24425,6 +35472,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-west-1",
}: endpoint{
@@ -24452,6 +35508,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-il-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -24506,6 +35580,40 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -24714,6 +35822,23 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "wafv2.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -24765,6 +35890,23 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "wafv2.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -24782,6 +35924,23 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -24799,6 +35958,23 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "wafv2.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -24833,6 +36009,23 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "wafv2.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -24938,6 +36131,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-south-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ap-southeast-1",
}: endpoint{
@@ -24965,6 +36167,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ap-southeast-4",
+ }: endpoint{
+ Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -24974,6 +36185,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -24983,6 +36203,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-central-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-north-1",
}: endpoint{
@@ -25001,6 +36230,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-eu-south-2",
+ }: endpoint{
+ Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-west-1",
}: endpoint{
@@ -25028,6 +36266,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-il-central-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -25082,6 +36338,40 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "wafv2.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "wafv2.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -25249,21 +36539,77 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "ui-ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-us-west-2",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
},
},
"workdocs": service{
@@ -25386,6 +36732,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -25463,6 +36812,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -25472,18 +36824,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -25529,6 +36893,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -25668,6 +37035,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "acm-pca": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "airflow": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"api.ecr": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -25688,6 +37080,20 @@ var awscnPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -25795,14 +37201,36 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"autoscaling": service{
@@ -25898,9 +37326,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"cloudformation": service{
@@ -26034,7 +37474,10 @@ var awscnPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
@@ -26060,6 +37503,41 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"dax": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26187,6 +37665,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"elasticache": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26272,9 +37775,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"emr-containers": service{
@@ -26287,14 +37802,49 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "emr-serverless": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"events": service{
@@ -26452,14 +38002,52 @@ var awscnPartition = partition{
},
},
},
- "iot": service{
+ "identitystore": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "inspector2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "execute-api",
- },
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
+ },
+ "iot": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
@@ -26512,6 +38100,29 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "iottwinmaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "api-cn-north-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "data-cn-north-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26522,6 +38133,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"kinesis": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26542,6 +38178,13 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "kinesisvideo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
"kms": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26594,6 +38237,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "license-manager-linux-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"logs": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26609,7 +38262,7 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
- Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
@@ -26626,6 +38279,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -26671,6 +38334,46 @@ var awscnPartition = partition{
},
},
},
+ "network-firewall": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "oam": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
+ "oidc": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "oidc.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"organizations": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -26693,6 +38396,34 @@ var awscnPartition = partition{
},
},
"pi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "pipes": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
@@ -26709,6 +38440,58 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "portal.sso": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
+ "quicksight": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
"ram": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26719,6 +38502,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "rbin": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"rds": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26739,6 +38532,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "redshift-serverless": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -26749,6 +38552,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "rolesanywhere": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"route53": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -26880,14 +38693,53 @@ var awscnPartition = partition{
},
},
},
+ "savingsplans": service{
+ IsRegionalized: boxedTrue,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "savingsplans.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "schemas": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"secretsmanager": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
},
},
"securityhub": service{
@@ -26934,12 +38786,29 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
- "sms": service{
+ "servicequotas": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
@@ -26949,6 +38818,39 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "signer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "verification-cn-north-1",
+ }: endpoint{
+ Hostname: "verification.signer.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-cn-northwest-1",
+ }: endpoint{
+ Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
"snowball": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -27030,14 +38932,36 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "sso": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"states": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "states.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"storagegateway": service{
@@ -27363,6 +39287,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -27371,6 +39313,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"acm": service{
@@ -27625,6 +39585,9 @@ var awsusgovPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -27772,12 +39735,42 @@ var awsusgovPartition = partition{
},
"appconfigdata": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
+ },
},
},
"application-autoscaling": service{
@@ -27794,13 +39787,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
+ Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
+ Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
+ },
},
},
"applicationinsights": service{
@@ -27842,6 +39867,24 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -27862,6 +39905,16 @@ var awsusgovPartition = partition{
},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -27885,21 +39938,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "athena-fips.us-gov-west-1.api.aws",
+ },
},
},
"autoscaling": service{
@@ -27933,13 +40010,37 @@ var awsusgovPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
},
},
},
@@ -28011,6 +40112,45 @@ var awsusgovPartition = partition{
},
},
},
+ "bedrock": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "bedrock-fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"cassandra": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28021,6 +40161,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cassandra.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "cassandra.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -28029,6 +40187,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cassandra.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "cassandra.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudcontrolapi": service{
@@ -28054,21 +40230,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws",
+ },
},
},
"clouddirectory": service{
@@ -28076,6 +40276,21 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudformation": service{
@@ -28088,6 +40303,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -28096,6 +40329,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"cloudhsm": service{
@@ -28123,6 +40374,14 @@ var awsusgovPartition = partition{
},
},
"cloudtrail": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
@@ -28293,6 +40552,15 @@ var awsusgovPartition = partition{
},
"codepipeline": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -28302,6 +40570,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -28313,6 +40590,13 @@ var awsusgovPartition = partition{
},
},
},
+ "codestar-connections": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ },
+ },
"cognito-identity": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28406,6 +40690,26 @@ var awsusgovPartition = partition{
},
},
},
+ "compute-optimizer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"config": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -28456,9 +40760,64 @@ var awsusgovPartition = partition{
},
"connect": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "connect.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "connect.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "controltower": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"data-ats.iot": service{
@@ -28551,9 +40910,24 @@ var awsusgovPartition = partition{
},
"databrew": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "databrew.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "databrew.us-gov-west-1.amazonaws.com",
+ },
},
},
"datasync": service{
@@ -28596,23 +40970,68 @@ var awsusgovPartition = partition{
},
},
},
- "directconnect": service{
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
- Hostname: "directconnect.us-gov-east-1.amazonaws.com",
+ Hostname: "datazone.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "datazone.us-gov-west-1.api.aws",
+ },
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "directconnect.us-gov-west-1.amazonaws.com",
+ Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
},
},
},
@@ -28621,9 +41040,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dlm.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "dlm.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dlm.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "dlm.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"dms": service{
@@ -28713,6 +41162,46 @@ var awsusgovPartition = partition{
},
},
},
+ "drs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"ds": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -28829,6 +41318,15 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.us-gov-east-1.api.aws",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -28837,6 +41335,15 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "ec2.us-gov-west-1.api.aws",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"ecs": service{
@@ -28930,6 +41437,31 @@ var awsusgovPartition = partition{
},
},
},
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-gov-west-1.api.aws",
+ },
+ },
+ },
"elasticache": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -28982,6 +41514,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -28990,6 +41540,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"elasticfilesystem": service{
@@ -29114,6 +41682,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@@ -29125,6 +41699,13 @@ var awsusgovPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@@ -29136,6 +41717,15 @@ var awsusgovPartition = partition{
},
"email": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -29145,6 +41735,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -29156,6 +41755,86 @@ var awsusgovPartition = partition{
},
},
},
+ "emr-containers": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "emr-serverless": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -29170,6 +41849,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@@ -29188,6 +41873,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@@ -29424,24 +42115,68 @@ var awsusgovPartition = partition{
},
},
},
+ "geo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "glacier.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "glacier.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "glacier.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "glacier.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
},
},
},
@@ -29468,21 +42203,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "glue.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "glue.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-west-1.api.aws",
+ },
},
},
"greengrass": service{
@@ -29512,36 +42271,38 @@ var awsusgovPartition = partition{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
- Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "greengrass.us-gov-east-1.amazonaws.com",
+ Hostname: "greengrass.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
- Region: "us-gov-east-1",
+ Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
+ Hostname: "greengrass.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "greengrass.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
},
},
},
@@ -29598,7 +42359,21 @@ var awsusgovPartition = partition{
},
},
"health": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "health.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "global.health.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -29667,19 +42442,89 @@ var awsusgovPartition = partition{
Deprecated: boxedTrue,
},
endpointKey{
- Region: "iam-govcloud",
+ Region: "iam-govcloud",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "iam-govcloud-fips",
+ }: endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "identitystore": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{},
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "identitystore.{region}.{dnsSuffix}",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "identitystore.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "identitystore.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "identitystore.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "identitystore.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "ingest.timestream": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
},
endpointKey{
- Region: "iam-govcloud-fips",
+ Region: "us-gov-west-1-fips",
}: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
+ Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -29687,20 +42532,12 @@ var awsusgovPartition = partition{
},
},
},
- "identitystore": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identitystore.{region}.{dnsSuffix}",
- },
- },
+ "inspector": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
- Hostname: "identitystore.us-gov-east-1.amazonaws.com",
+ Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
@@ -29709,7 +42546,7 @@ var awsusgovPartition = partition{
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "identitystore.us-gov-west-1.amazonaws.com",
+ Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -29722,7 +42559,7 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "identitystore.us-gov-east-1.amazonaws.com",
+ Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
@@ -29731,16 +42568,16 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "identitystore.us-gov-west-1.amazonaws.com",
+ Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
},
},
},
- "inspector": service{
+ "inspector2": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
- Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
@@ -29749,7 +42586,7 @@ var awsusgovPartition = partition{
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -29762,7 +42599,7 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
@@ -29771,35 +42608,49 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com",
},
},
},
- "iot": service{
+ "internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "execute-api",
- },
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "internetmonitor.us-gov-west-1.api.aws",
},
},
+ },
+ "iot": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "iot-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "execute-api",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "iot-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "execute-api",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
@@ -29944,14 +42795,114 @@ var awsusgovPartition = partition{
},
},
},
+ "iottwinmaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "api-us-gov-west-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "data-us-gov-west-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-api-us-gov-west-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-data-us-gov-west-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kafka.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "kafka.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "kafka.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "kafka.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"kendra": service{
@@ -29976,8 +42927,51 @@ var awsusgovPartition = partition{
},
},
},
+ "kendra-ranking": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.us-gov-west-1.api.aws",
+ },
+ },
+ },
"kinesis": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
@@ -29986,6 +42980,15 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesis.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -29994,6 +42997,15 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesis.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
},
},
"kinesisanalytics": service{
@@ -30006,6 +43018,62 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "kinesisvideo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"kms": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -30078,21 +43146,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-gov-west-1.api.aws",
+ },
},
},
"lambda": service{
@@ -30187,6 +43279,26 @@ var awsusgovPartition = partition{
},
},
},
+ "license-manager-linux-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "license-manager-user-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"logs": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -30227,6 +43339,36 @@ var awsusgovPartition = partition{
},
},
},
+ "m2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ },
+ },
"managedblockchain": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -30237,12 +43379,22 @@ var awsusgovPartition = partition{
"mediaconvert": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
},
},
},
@@ -30303,6 +43455,63 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
+ "mgn": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "mgn-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "mgn-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "mgn-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "models-v2-lex": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"models.lex": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -30500,6 +43709,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "aws-us-gov-global",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "networkmanager.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-us-gov-global",
+ }: endpoint{
+ Hostname: "networkmanager.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"oidc": service{
@@ -30557,32 +43784,136 @@ var awsusgovPartition = partition{
"outposts": service{
Endpoints: serviceEndpoints{
endpointKey{
- Region: "us-gov-east-1",
+ Region: "fips-us-gov-east-1",
}: endpoint{
Hostname: "outposts.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "outposts.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "outposts.us-gov-west-1.amazonaws.com",
},
},
},
"participant.connect": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "pi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-gov-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
},
},
},
@@ -30665,6 +43996,31 @@ var awsusgovPartition = partition{
},
},
},
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-gov-west-1.api.aws",
+ },
+ },
+ },
"quicksight": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -30685,6 +44041,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "ram.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -30693,6 +44067,64 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "ram.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "rbin": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-gov-west-1.amazonaws.com",
+ },
},
},
"rds": service{
@@ -30830,6 +44262,46 @@ var awsusgovPartition = partition{
},
},
},
+ "resiliencehub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"resource-groups": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -30885,6 +44357,46 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "rolesanywhere": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"route53": service{
PartitionEndpoint: "aws-us-gov-global",
IsRegionalized: boxedFalse,
@@ -30922,6 +44434,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
+
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "runtime-v2-lex": service{
+ Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -30974,6 +44519,9 @@ var awsusgovPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -31216,17 +44764,33 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
},
},
"secretsmanager": service{
@@ -31234,37 +44798,43 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
+
Deprecated: boxedTrue,
},
},
@@ -31309,6 +44879,46 @@ var awsusgovPartition = partition{
},
},
},
+ "securitylake": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "securitylake.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "securitylake.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"serverlessrepo": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -31318,21 +44928,45 @@ var awsusgovPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
},
},
},
@@ -31426,12 +45060,24 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
@@ -31444,12 +45090,24 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
@@ -31512,12 +45170,12 @@ var awsusgovPartition = partition{
},
},
},
- "sms": service{
+ "signer": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-east-1",
}: endpoint{
- Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
@@ -31526,7 +45184,79 @@ var awsusgovPartition = partition{
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
+ Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-verification-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "verification-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "simspaceweaver": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
@@ -31539,7 +45269,29 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "sms-fips.us-gov-east-1.amazonaws.com",
+ Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "sms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
@@ -31554,9 +45306,42 @@ var awsusgovPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
+ },
},
},
"snowball": service{
@@ -31631,14 +45416,14 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
- Protocols: []string{"http", "https"},
+ Protocols: []string{"https"},
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "sns.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
+ Protocols: []string{"https"},
},
},
},
@@ -31730,6 +45515,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sso.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "sso.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -31738,6 +45541,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sso.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "sso.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"states": service{
@@ -31982,6 +45803,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "swf.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -31990,6 +45829,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "swf.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"synthetics": service{
@@ -32065,21 +45922,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-gov-west-1.api.aws",
+ },
},
},
"transcribe": service{
@@ -32173,23 +46054,72 @@ var awsusgovPartition = partition{
Hostname: "transfer-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "translate": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "verifiedpermissions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "translate": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
},
- },
- Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -32197,16 +46127,7 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
},
},
},
@@ -32334,6 +46255,15 @@ var awsusgovPartition = partition{
},
"workspaces": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -32343,6 +46273,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -32456,6 +46395,20 @@ var awsisoPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32468,6 +46421,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"appconfig": service{
@@ -32505,6 +46461,23 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
+ "athena": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"autoscaling": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32517,6 +46490,16 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "cloudcontrolapi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
"cloudformation": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32576,6 +46559,46 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
+ },
+ },
+ },
"directconnect": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32586,6 +46609,16 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "dlm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
"dms": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -32666,6 +46699,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"dynamodb": service{
@@ -32685,6 +46721,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"ec2": service{
@@ -32717,6 +46756,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"elasticache": service{
@@ -32740,6 +46782,15 @@ var awsisoPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
@@ -32749,6 +46800,15 @@ var awsisoPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov",
},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov",
+ },
},
},
"elasticloadbalancing": service{
@@ -32765,14 +46825,45 @@ var awsisoPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
+ },
},
},
"es": service{
@@ -32800,6 +46891,58 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
+ "fsx": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-prod-us-iso-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-us-iso-east-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ },
},
},
"glacier": service{
@@ -32814,6 +46957,26 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "glue": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"health": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32909,6 +47072,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"logs": service{
@@ -32935,6 +47101,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32957,19 +47130,130 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
+ "rbin": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
+ },
},
},
"rds": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rds.us-iso-east-1",
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "rds.us-iso-west-1",
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-east-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Hostname: "redshift.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{
+ Hostname: "redshift.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ },
+ },
+ "resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
@@ -32998,6 +47282,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"runtime.sagemaker": service{
@@ -33014,14 +47301,185 @@ var awsisoPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"http", "https"},
SignatureVersions: []string{"s3v4"},
},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-west-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "s3-outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
}: endpoint{},
},
},
@@ -33030,6 +47488,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"snowball": service{
@@ -33037,6 +47498,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"sns": service{
@@ -33068,6 +47532,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"states": service{
@@ -33135,9 +47602,22 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"tagging": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
+ "textract": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
@@ -33180,6 +47660,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
},
@@ -33233,6 +47716,34 @@ var awsisobPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"appconfig": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33259,6 +47770,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"autoscaling": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -33271,6 +47789,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "cloudcontrolapi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"cloudformation": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33306,6 +47831,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "dlm": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"dms": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -33460,9 +47992,24 @@ var awsisobPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
+ },
},
},
"es": service{
@@ -33479,6 +48026,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33566,6 +48120,20 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "medialive": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "mediapackage": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"metering.marketplace": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -33580,6 +48148,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "metrics.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"monitoring": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33587,6 +48162,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"ram": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33594,14 +48176,72 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "rbin": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+ Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ },
+ },
"rds": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rds.us-isob-east-1",
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"redshift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "resource-groups": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
@@ -33622,6 +48262,20 @@ var awsisobPartition = partition{
},
},
},
+ "route53resolver": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"s3": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -33629,6 +48283,110 @@ var awsisobPartition = partition{
SignatureVersions: []string{"s3v4"},
},
},
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "s3-outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ },
+ },
+ "secretsmanager": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
@@ -33681,6 +48439,37 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "storagegateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"streams.dynamodb": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -33746,3 +48535,75 @@ var awsisobPartition = partition{
},
},
}
+
+// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe).
+func AwsIsoEPartition() Partition {
+ return awsisoePartition.Partition()
+}
+
+var awsisoePartition = partition{
+ ID: "aws-iso-e",
+ Name: "AWS ISOE (Europe)",
+ DNSSuffix: "cloud.adc-e.uk",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{
+ "eu-isoe-west-1": region{
+ Description: "EU ISOE West",
+ },
+ },
+ Services: services{},
+}
+
+// AwsIsoFPartition returns the Resolver for AWS ISOF.
+func AwsIsoFPartition() Partition {
+ return awsisofPartition.Partition()
+}
+
+var awsisofPartition = partition{
+ ID: "aws-iso-f",
+ Name: "AWS ISOF",
+ DNSSuffix: "csp.hci.ic.gov",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ Regions: regions{},
+ Services: services{},
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
index 4601f883c..992ed0464 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro
s := a.Expected.(int)
result = s == req.HTTPResponse.StatusCode
case ErrorWaiterMatch:
- if aerr, ok := err.(awserr.Error); ok {
- result = aerr.Code() == a.Expected.(string)
+ switch ex := a.Expected.(type) {
+ case string:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == ex
+ }
+ case bool:
+ if ex {
+ result = err != nil
+ } else {
+ result = err == nil
+ }
}
default:
waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
index 1d3f4c3ad..ea8e35376 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -14,6 +14,7 @@ import (
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
+ "github.com/aws/aws-sdk-go/service/ssooidc"
"github.com/aws/aws-sdk-go/service/sts"
)
@@ -23,6 +24,10 @@ type CredentialsProviderOptions struct {
// WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider,
// such as setting its ExpiryWindow.
WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider)
+
+ // ProcessProviderOptions configures a ProcessProvider,
+ // such as setting its Timeout.
+ ProcessProviderOptions func(*processcreds.ProcessProvider)
}
func resolveCredentials(cfg *aws.Config,
@@ -33,7 +38,7 @@ func resolveCredentials(cfg *aws.Config,
switch {
case len(sessOpts.Profile) != 0:
- // User explicitly provided an Profile in the session's configuration
+ // User explicitly provided a Profile in the session's configuration
// so load that profile from shared config first.
// Github(aws/aws-sdk-go#2727)
return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
@@ -134,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config,
case len(sharedCfg.CredentialProcess) != 0:
// Get credentials from CredentialProcess
- creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
+ var optFns []func(*processcreds.ProcessProvider)
+ if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil {
+ optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions)
+ }
+ creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...)
default:
// Fallback to default credentials provider, include mock errors for
@@ -173,8 +182,28 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req
return nil, err
}
+ var optFns []func(provider *ssocreds.Provider)
cfgCopy := cfg.Copy()
- cfgCopy.Region = &sharedCfg.SSORegion
+
+ if sharedCfg.SSOSession != nil {
+ cfgCopy.Region = &sharedCfg.SSOSession.SSORegion
+ cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name)
+ if err != nil {
+ return nil, err
+ }
+ // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials
+ mySession := Must(NewSession(&aws.Config{
+ Credentials: credentials.AnonymousCredentials,
+ }))
+ oidcClient := ssooidc.New(mySession, cfgCopy)
+ tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath)
+ optFns = append(optFns, func(p *ssocreds.Provider) {
+ p.TokenProvider = tokenProvider
+ p.CachedTokenFilepath = cachedPath
+ })
+ } else {
+ cfgCopy.Region = &sharedCfg.SSORegion
+ }
return ssocreds.NewCredentials(
&Session{
@@ -184,6 +213,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req
sharedCfg.SSOAccountID,
sharedCfg.SSORoleName,
sharedCfg.SSOStartURL,
+ optFns...,
), nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index d6fa24776..93bb5de64 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -171,6 +171,12 @@ type envConfig struct {
// AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // AWS_EC2_METADATA_V1_DISABLED=true
+ EC2IMDSv1Disabled *bool
+
// Specifies that SDK clients must resolve a dual-stack endpoint for
// services.
//
@@ -251,6 +257,9 @@ var (
ec2IMDSEndpointModeEnvKey = []string{
"AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE",
}
+ ec2MetadataV1DisabledEnvKey = []string{
+ "AWS_EC2_METADATA_V1_DISABLED",
+ }
useCABundleKey = []string{
"AWS_CA_BUNDLE",
}
@@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil {
return envConfig{}, err
}
+ setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey)
if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil {
return cfg, err
@@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) {
}
}
+func setBoolPtrFromEnvVal(dst **bool, keys []string) {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch {
+ case strings.EqualFold(value, "false"):
+ *dst = new(bool)
+ **dst = false
+ case strings.EqualFold(value, "true"):
+ *dst = new(bool)
+ **dst = true
+ }
+ }
+}
+
func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 4293dbe10..3c88dee52 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -37,7 +37,7 @@ const (
// ErrSharedConfigSourceCollision will be returned if a section contains both
// source_profile and credential_source
-var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil)
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil)
// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
// variables are empty and Environment was set as the credential source
@@ -174,7 +174,6 @@ const (
// Options provides the means to control how a Session is created and what
// configuration values will be loaded.
-//
type Options struct {
// Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field
@@ -224,7 +223,7 @@ type Options struct {
// from stdin for the MFA token code.
//
// This field is only used if the shared configuration is enabled, and
- // the config enables assume role wit MFA via the mfa_serial field.
+ // the config enables assume role with MFA via the mfa_serial field.
AssumeRoleTokenProvider func() (string, error)
// When the SDK's shared config is configured to assume a role this option
@@ -322,24 +321,24 @@ type Options struct {
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
-// // Equivalent to session.New
-// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
//
-// // Specify profile to load for the session's config
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Profile: "profile_name",
-// }))
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
//
-// // Specify profile for config and region for requests
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Config: aws.Config{Region: aws.String("us-east-1")},
-// Profile: "profile_name",
-// }))
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
//
-// // Force enable Shared Config support
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// SharedConfigState: session.SharedConfigEnable,
-// }))
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
var err error
@@ -375,7 +374,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
// This helper is intended to be used in variable initialization to load the
// Session and configuration at startup. Such as:
//
-// var sess = session.Must(session.NewSession())
+// var sess = session.Must(session.NewSession())
func Must(sess *Session, err error) *Session {
if err != nil {
panic(err)
@@ -780,14 +779,12 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode)
}
- // Configure credentials if not already set by the user when creating the
- // Session.
- if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
- creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
- if err != nil {
- return err
- }
- cfg.Credentials = creds
+ cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback
+ if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil {
+ cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled)
+ }
+ if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil {
+ cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled)
}
cfg.S3UseARNRegion = userCfg.S3UseARNRegion
@@ -812,6 +809,17 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
}
}
+ // Configure credentials if not already set by the user when creating the Session.
+ // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers.
+ // ticket: P83606045
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+ creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ if err != nil {
+ return err
+ }
+ cfg.Credentials = creds
+ }
+
return nil
}
@@ -845,8 +853,8 @@ func initHandlers(s *Session) {
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
//
-// // Create a copy of the current Session, configured for the us-west-2 region.
-// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index 424c82b4d..2945185b0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -26,6 +26,13 @@ const (
roleSessionNameKey = `role_session_name` // optional
roleDurationSecondsKey = "duration_seconds" // optional
+ // Prefix to be used for SSO sections. These are supposed to only exist in
+ // the shared config file, not the credentials file.
+ ssoSectionPrefix = `sso-session `
+
+ // AWS Single Sign-On (AWS SSO) group
+ ssoSessionNameKey = "sso_session"
+
// AWS Single Sign-On (AWS SSO) group
ssoAccountIDKey = "sso_account_id"
ssoRegionKey = "sso_region"
@@ -73,6 +80,9 @@ const (
// EC2 IMDS Endpoint
ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
+ // ECS IMDSv1 disable fallback
+ ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
+
// Use DualStack Endpoint Resolution
useDualStackEndpoint = "use_dualstack_endpoint"
@@ -99,6 +109,10 @@ type sharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
+ // SSO session options
+ SSOSessionName string
+ SSOSession *ssoSession
+
SSOAccountID string
SSORegion string
SSORoleName string
@@ -168,6 +182,12 @@ type sharedConfig struct {
// ec2_metadata_service_endpoint=http://fd00:ec2::254
EC2IMDSEndpoint string
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // ec2_metadata_v1_disabled=true
+ EC2IMDSv1Disabled *bool
+
// Specifies that SDK clients must resolve a dual-stack endpoint for
// services.
//
@@ -186,6 +206,20 @@ type sharedConfigFile struct {
IniData ini.Sections
}
+// SSOSession provides the shared configuration parameters of the sso-session
+// section.
+type ssoSession struct {
+ Name string
+ SSORegion string
+ SSOStartURL string
+}
+
+func (s *ssoSession) setFromIniSection(section ini.Section) {
+ updateString(&s.Name, section, ssoSessionNameKey)
+ updateString(&s.SSORegion, section, ssoRegionKey)
+ updateString(&s.SSOStartURL, section, ssoStartURL)
+}
+
// loadSharedConfig retrieves the configuration from the list of files using
// the profile provided. The order the files are listed will determine
// precedence. Values in subsequent files will overwrite values defined in
@@ -266,13 +300,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
// profile only have credential provider options.
cfg.clearAssumeRoleOptions()
} else {
- // First time a profile has been seen, It must either be a assume role
- // credentials, or SSO. Assert if the credential type requires a role ARN,
- // the ARN is also set, or validate that the SSO configuration is complete.
+ // First time a profile has been seen. Assert if the credential type
+ // requires a role ARN, the ARN is also set
if err := cfg.validateCredentialsConfig(profile); err != nil {
return err
}
}
+
profiles[profile] = struct{}{}
if err := cfg.validateCredentialType(); err != nil {
@@ -308,6 +342,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
cfg.SourceProfile = srcCfg
}
+ // If the profile contains an SSO session parameter, the session MUST exist
+ // as a section in the config file. Load the SSO session using the name
+ // provided. If the session section is not found or incomplete an error
+ // will be returned.
+ if cfg.hasSSOTokenProviderConfiguration() {
+ skippedFiles = 0
+ for _, f := range files {
+ section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))
+ if ok {
+ var ssoSession ssoSession
+ ssoSession.setFromIniSection(section)
+ ssoSession.Name = cfg.SSOSessionName
+ cfg.SSOSession = &ssoSession
+ break
+ }
+ skippedFiles++
+ }
+ if skippedFiles == len(files) {
+ // If all files were skipped because the sso session section is not found, return
+ // the sso section not found error.
+ return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName)
+ }
+ }
+
return nil
}
@@ -340,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.Region, section, regionKey)
updateString(&cfg.CustomCABundle, section, customCABundleKey)
+ // we're retaining a behavioral quirk with this field that existed before
+ // the removal of literal parsing for (aws-sdk-go-v2/#2276):
+ // - if the key is missing, the config field will not be set
+ // - if the key is set to a non-numeric, the config field will be set to 0
if section.Has(roleDurationSecondsKey) {
- d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
+ var d time.Duration
+ if v, ok := section.Int(roleDurationSecondsKey); ok {
+ d = time.Duration(v) * time.Second
+ }
cfg.AssumeRoleDuration = &d
}
@@ -363,6 +428,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
cfg.S3UsEast1RegionalEndpoint = sre
}
+ // AWS Single Sign-On (AWS SSO)
+ // SSO session options
+ updateString(&cfg.SSOSessionName, section, ssoSessionNameKey)
+
// AWS Single Sign-On (AWS SSO)
updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
updateString(&cfg.SSORegion, section, ssoRegionKey)
@@ -374,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
ec2MetadataServiceEndpointModeKey, file.Filename, err)
}
updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
+ updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint)
@@ -461,32 +531,20 @@ func (cfg *sharedConfig) validateCredentialType() error {
}
func (cfg *sharedConfig) validateSSOConfiguration() error {
- if !cfg.hasSSOConfiguration() {
+ if cfg.hasSSOTokenProviderConfiguration() {
+ err := cfg.validateSSOTokenProviderConfiguration()
+ if err != nil {
+ return err
+ }
return nil
}
- var missing []string
- if len(cfg.SSOAccountID) == 0 {
- missing = append(missing, ssoAccountIDKey)
- }
-
- if len(cfg.SSORegion) == 0 {
- missing = append(missing, ssoRegionKey)
- }
-
- if len(cfg.SSORoleName) == 0 {
- missing = append(missing, ssoRoleNameKey)
- }
-
- if len(cfg.SSOStartURL) == 0 {
- missing = append(missing, ssoStartURL)
- }
-
- if len(missing) > 0 {
- return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
- cfg.Profile, strings.Join(missing, ", "))
+ if cfg.hasLegacySSOConfiguration() {
+ err := cfg.validateLegacySSOConfiguration()
+ if err != nil {
+ return err
+ }
}
-
return nil
}
@@ -525,15 +583,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() {
}
func (cfg *sharedConfig) hasSSOConfiguration() bool {
- switch {
- case len(cfg.SSOAccountID) != 0:
- case len(cfg.SSORegion) != 0:
- case len(cfg.SSORoleName) != 0:
- case len(cfg.SSOStartURL) != 0:
- default:
- return false
+ return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration()
+}
+
+func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool {
+ return len(c.SSOSessionName) > 0
+}
+
+func (c *sharedConfig) hasLegacySSOConfiguration() bool {
+ return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
+}
+
+func (c *sharedConfig) validateSSOTokenProviderConfiguration() error {
+ var missing []string
+
+ if len(c.SSOSessionName) == 0 {
+ missing = append(missing, ssoSessionNameKey)
}
- return true
+
+ if c.SSOSession == nil {
+ missing = append(missing, ssoSectionPrefix)
+ } else {
+ if len(c.SSOSession.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSOSession.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURL)
+ }
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+
+ if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
+ }
+
+ if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix)
+ }
+
+ return nil
+}
+
+func (c *sharedConfig) validateLegacySSOConfiguration() error {
+ var missing []string
+
+ if len(c.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURL)
+ }
+
+ if len(c.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(c.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+ return nil
}
func oneOrNone(bs ...bool) bool {
@@ -566,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
- *dst = section.Bool(key)
+
+ // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
+ *dst = v
}
// updateBoolPtr will only update the dst with the value in the section key,
@@ -575,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
+
+ // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
*dst = new(bool)
- **dst = section.Bool(key)
+ **dst = v
}
// SharedConfigLoadError is an error for the shared config file failed to load.
@@ -703,7 +828,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i
return
}
- if section.Bool(key) {
+ // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = endpoints.DualStackEndpointStateEnabled
} else {
*dst = endpoints.DualStackEndpointStateDisabled
@@ -719,7 +845,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section
return
}
- if section.Bool(key) {
+ // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = endpoints.FIPSEndpointStateEnabled
} else {
*dst = endpoints.FIPSEndpointStateDisabled
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 4d78162c0..b542df931 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -3,21 +3,21 @@
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
//
-// Standalone Signer
+// # Standalone Signer
//
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
-// you many need to use the URL.Opaque to define what the raw URI should be sent
+// you may need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
//
-// "///"
+// "///"
//
-// // e.g.
-// "//example.com/some/path"
+// // e.g.
+// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
@@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Expected-Bucket-Owner": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
@@ -135,6 +136,7 @@ var requiredSignedHeaders = rules{
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Context": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
@@ -695,7 +697,8 @@ func (ctx *signingCtx) buildBodyDigest() error {
includeSHA256Header := ctx.unsignedPayload ||
ctx.ServiceName == "s3" ||
ctx.ServiceName == "s3-object-lambda" ||
- ctx.ServiceName == "glacier"
+ ctx.ServiceName == "glacier" ||
+ ctx.ServiceName == "s3-outposts"
s3Presign := ctx.isPresign &&
(ctx.ServiceName == "s3" ||
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 7922995d0..7ab65bae7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.44.100"
+const SDKVersion = "1.55.6"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
index 34a481afb..b1b686086 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -154,11 +154,11 @@ func (v ValueType) String() string {
// ValueType enums
const (
NoneType = ValueType(iota)
- DecimalType
- IntegerType
+ DecimalType // deprecated
+ IntegerType // deprecated
StringType
QuotedStringType
- BoolType
+ BoolType // deprecated
)
// Value is a union container
@@ -166,9 +166,9 @@ type Value struct {
Type ValueType
raw []rune
- integer int64
- decimal float64
- boolean bool
+ integer int64 // deprecated
+ decimal float64 // deprecated
+ boolean bool // deprecated
str string
}
@@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) {
}
token = newToken(TokenLit, b[:n], QuotedStringType)
- } else if isNumberValue(b) {
- var base int
- base, n, err = getNumericalValue(b)
- if err != nil {
- return token, 0, err
- }
-
- value := b[:n]
- vType := IntegerType
- if contains(value, '.') || hasExponent(value) {
- vType = DecimalType
- }
- token = newToken(TokenLit, value, vType)
- token.base = base
- } else if isBoolValue(b) {
- n, err = getBoolValue(b)
-
- token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
@@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) {
}
// IntValue returns an integer value
-func (v Value) IntValue() int64 {
- return v.integer
+func (v Value) IntValue() (int64, bool) {
+ i, err := strconv.ParseInt(string(v.raw), 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return i, true
}
// FloatValue returns a float value
-func (v Value) FloatValue() float64 {
- return v.decimal
+func (v Value) FloatValue() (float64, bool) {
+ f, err := strconv.ParseFloat(string(v.raw), 64)
+ if err != nil {
+ return 0, false
+ }
+ return f, true
}
// BoolValue returns a bool value
-func (v Value) BoolValue() bool {
- return v.boolean
+func (v Value) BoolValue() (bool, bool) {
+ // we don't use ParseBool as it recognizes more than what we've
+ // historically supported
+ if isCaselessLitValue(runesTrue, v.raw) {
+ return true, true
+ } else if isCaselessLitValue(runesFalse, v.raw) {
+ return false, true
+ }
+ return false, false
}
func isTrimmable(r rune) bool {
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
index 081cf4334..1d08e138a 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) {
}
// Bool returns a bool value at k
-func (t Section) Bool(k string) bool {
+func (t Section) Bool(k string) (bool, bool) {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
-func (t Section) Int(k string) int64 {
+func (t Section) Int(k string) (int64, bool) {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
-func (t Section) Float64(k string) float64 {
+func (t Section) Float64(k string) (float64, bool) {
return t.values[k].FloatValue()
}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
index ebcbc2b40..34fea49ca 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -1,9 +1,8 @@
package shareddefaults
import (
- "os"
+ "os/user"
"path/filepath"
- "runtime"
)
// SharedCredentialsFilename returns the SDK's default file path
@@ -31,10 +30,17 @@ func SharedConfigFilename() string {
// UserHomeDir returns the home directory for the user the process is
// running under.
func UserHomeDir() string {
- if runtime.GOOS == "windows" { // Windows
- return os.Getenv("USERPROFILE")
+ var home string
+
+ home = userHomeDir()
+ if len(home) > 0 {
+ return home
+ }
+
+ currUser, _ := user.Current()
+ if currUser != nil {
+ home = currUser.HomeDir
}
- // *nix
- return os.Getenv("HOME")
+ return home
}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
new file mode 100644
index 000000000..eb298ae0f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
@@ -0,0 +1,18 @@
+//go:build !go1.12
+// +build !go1.12
+
+package shareddefaults
+
+import (
+ "os"
+ "runtime"
+)
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
new file mode 100644
index 000000000..51541b508
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
@@ -0,0 +1,13 @@
+//go:build go1.12
+// +build go1.12
+
+package shareddefaults
+
+import (
+ "os"
+)
+
+func userHomeDir() string {
+ home, _ := os.UserHomeDir()
+ return home
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
index 2aec80661..12e814ddf 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
@@ -4,7 +4,6 @@ package jsonutil
import (
"bytes"
"encoding/base64"
- "encoding/json"
"fmt"
"math"
"reflect"
@@ -16,6 +15,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
+const (
+ floatNaN = "NaN"
+ floatInf = "Infinity"
+ floatNegInf = "-Infinity"
+)
+
var timeType = reflect.ValueOf(time.Time{}).Type()
var byteSliceType = reflect.ValueOf([]byte{}).Type()
@@ -211,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro
buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
case reflect.Float64:
f := value.Float()
- if math.IsInf(f, 0) || math.IsNaN(f) {
- return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+ switch {
+ case math.IsNaN(f):
+ writeString(floatNaN, buf)
+ case math.IsInf(f, 1):
+ writeString(floatInf, buf)
+ case math.IsInf(f, -1):
+ writeString(floatNegInf, buf)
+ default:
+ buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
}
- buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
default:
switch converted := value.Interface().(type) {
case time.Time:
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
index 8b2c9bbeb..f9334879b 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
+ "math"
"math/big"
"reflect"
"strings"
@@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
return err
}
value.Set(reflect.ValueOf(v))
+ case *float64:
+ // These are regular strings when parsed by encoding/json's unmarshaler.
+ switch {
+ case strings.EqualFold(d, floatNaN):
+ value.Set(reflect.ValueOf(aws.Float64(math.NaN())))
+ case strings.EqualFold(d, floatInf):
+ value.Set(reflect.ValueOf(aws.Float64(math.Inf(1))))
+ case strings.EqualFold(d, floatNegInf):
+ value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1))))
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", d)
+ }
default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
index c0c52e2db..9c1ccde54 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
@@ -13,17 +13,46 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
)
+const (
+ awsQueryError = "x-amzn-query-error"
+ // A valid header example - "x-amzn-query-error": ";"
+ awsQueryErrorPartsCount = 2
+)
+
// UnmarshalTypedError provides unmarshaling errors API response errors
// for both typed and untyped errors.
type UnmarshalTypedError struct {
- exceptions map[string]func(protocol.ResponseMetadata) error
+ exceptions map[string]func(protocol.ResponseMetadata) error
+ queryExceptions map[string]func(protocol.ResponseMetadata, string) error
}
// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
// set of exception names to the error unmarshalers
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
return &UnmarshalTypedError{
- exceptions: exceptions,
+ exceptions: exceptions,
+ queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{},
+ }
+}
+
+// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError
+// before returning it
+func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError {
+ unmarshaledError := NewUnmarshalTypedError(exceptions)
+ for _, fn := range optFns {
+ fn(unmarshaledError)
+ }
+ return unmarshaledError
+}
+
+// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions.
+// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found.
+// See also [awsQueryCompatible trait]
+//
+// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait
+func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) {
+ return func(typedError *UnmarshalTypedError) {
+ typedError.queryExceptions = queryExceptions
}
}
@@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError(
code := codeParts[len(codeParts)-1]
msg := jsonErr.Message
+ queryCodeParts := queryCodeParts(resp, u)
+
if fn, ok := u.exceptions[code]; ok {
- // If exception code is know, use associated constructor to get a value
+ // If query-compatible exceptions are found and query-error-header is found,
+ // then use associated constructor to get exception with query error code.
+ //
+ // If exception code is known, use associated constructor to get a value
// for the exception that the JSON body can be unmarshaled into.
- v := fn(respMeta)
+ var v error
+ queryErrFn, queryExceptionsFound := u.queryExceptions[code]
+ if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound {
+ v = queryErrFn(respMeta, queryCodeParts[0])
+ } else {
+ v = fn(respMeta)
+ }
err := jsonutil.UnmarshalJSONCaseInsensitive(v, body)
if err != nil {
return nil, err
}
-
return v, nil
}
+ if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 {
+ code = queryCodeParts[0]
+ }
+
// fallback to unmodeled generic exceptions
return awserr.NewRequestFailure(
awserr.New(code, msg, nil),
@@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError(
), nil
}
+// A valid header example - "x-amzn-query-error": ";"
+func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string {
+ queryCodeHeader := resp.Header.Get(awsQueryError)
+ var queryCodeParts []string
+ if queryCodeHeader != "" && len(u.queryExceptions) > 0 {
+ queryCodeParts = strings.Split(queryCodeHeader, ";")
+ }
+ return queryCodeParts
+}
+
// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc
// protocol request errors
var UnmarshalErrorHandler = request.NamedHandler{
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 75866d012..2ca0b19db 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -3,6 +3,7 @@ package queryutil
import (
"encoding/base64"
"fmt"
+ "math"
"net/url"
"reflect"
"sort"
@@ -13,6 +14,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
+const (
+ floatNaN = "NaN"
+ floatInf = "Infinity"
+ floatNegInf = "-Infinity"
+)
+
// Parse parses an object i and fills a url.Values object. The isEC2 flag
// indicates if this is the EC2 Query sub-protocol.
func Parse(body url.Values, i interface{}, isEC2 bool) error {
@@ -115,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- // If it's empty, generate an empty value
- if !value.IsNil() && value.Len() == 0 {
+ // If it's empty, and not ec2, generate an empty value
+ if !value.IsNil() && value.Len() == 0 && !q.isEC2 {
v.Set(prefix, "")
return nil
}
@@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
case int:
v.Set(name, strconv.Itoa(value))
case float64:
- v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ var str string
+ switch {
+ case math.IsNaN(value):
+ str = floatNaN
+ case math.IsInf(value, 1):
+ str = floatInf
+ case math.IsInf(value, -1):
+ str = floatNegInf
+ default:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ }
+ v.Set(name, str)
case float32:
- v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ asFloat64 := float64(value)
+ var str string
+ switch {
+ case math.IsNaN(asFloat64):
+ str = floatNaN
+ case math.IsInf(asFloat64, 1):
+ str = floatInf
+ case math.IsInf(asFloat64, -1):
+ str = floatNegInf
+ default:
+ str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
+ }
+ v.Set(name, str)
case time.Time:
const ISO8601UTC = "2006-01-02T15:04:05Z"
format := tag.Get("timestampFormat")
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index 63f66af2c..ecc521f88 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -6,6 +6,7 @@ import (
"encoding/base64"
"fmt"
"io"
+ "math"
"net/http"
"net/url"
"path"
@@ -20,6 +21,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
+const (
+ floatNaN = "NaN"
+ floatInf = "Infinity"
+ floatNegInf = "-Infinity"
+)
+
// Whether the byte value can be sent without escaping in AWS URLs
var noEscape [256]bool
@@ -280,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
if tag.Get("location") != "header" || tag.Get("enum") == "" {
return "", fmt.Errorf("%T is only supported with location header and enum shapes", value)
}
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+
buff := &bytes.Buffer{}
for i, sv := range value {
if sv == nil || len(*sv) == 0 {
@@ -302,7 +313,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
case int64:
str = strconv.FormatInt(value, 10)
case float64:
- str = strconv.FormatFloat(value, 'f', -1, 64)
+ switch {
+ case math.IsNaN(value):
+ str = floatNaN
+ case math.IsInf(value, 1):
+ str = floatInf
+ case math.IsInf(value, -1):
+ str = floatNegInf
+ default:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ }
case time.Time:
format := tag.Get("timestampFormat")
if len(format) == 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index cdef403e2..79fcf1699 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
"net/http"
"reflect"
"strconv"
@@ -231,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
}
v.Set(reflect.ValueOf(&i))
case *float64:
- f, err := strconv.ParseFloat(header, 64)
- if err != nil {
- return err
+ var f float64
+ switch {
+ case strings.EqualFold(header, floatNaN):
+ f = math.NaN()
+ case strings.EqualFold(header, floatInf):
+ f = math.Inf(1)
+ case strings.EqualFold(header, floatNegInf):
+ f = math.Inf(-1)
+ default:
+ var err error
+ f, err = strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
}
v.Set(reflect.ValueOf(&f))
case *time.Time:
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
index d756d8cc5..5366a646d 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
@@ -2,6 +2,7 @@ package restjson
import (
"bytes"
+ "encoding/json"
"io"
"io/ioutil"
"net/http"
@@ -40,52 +41,30 @@ func (u *UnmarshalTypedError) UnmarshalError(
resp *http.Response,
respMeta protocol.ResponseMetadata,
) (error, error) {
-
- code := resp.Header.Get(errorTypeHeader)
- msg := resp.Header.Get(errorMessageHeader)
-
- body := resp.Body
- if len(code) == 0 {
- // If unable to get code from HTTP headers have to parse JSON message
- // to determine what kind of exception this will be.
- var buf bytes.Buffer
- var jsonErr jsonErrorResponse
- teeReader := io.TeeReader(resp.Body, &buf)
- err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
- if err != nil {
- return nil, err
- }
-
- body = ioutil.NopCloser(&buf)
- code = jsonErr.Code
- msg = jsonErr.Message
+ code, msg, err := unmarshalErrorInfo(resp)
+ if err != nil {
+ return nil, err
}
- // If code has colon separators remove them so can compare against modeled
- // exception names.
- code = strings.SplitN(code, ":", 2)[0]
-
- if fn, ok := u.exceptions[code]; ok {
- // If exception code is know, use associated constructor to get a value
- // for the exception that the JSON body can be unmarshaled into.
- v := fn(respMeta)
- if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil {
- return nil, err
- }
+ fn, ok := u.exceptions[code]
+ if !ok {
+ return awserr.NewRequestFailure(
+ awserr.New(code, msg, nil),
+ respMeta.StatusCode,
+ respMeta.RequestID,
+ ), nil
+ }
- if err := rest.UnmarshalResponse(resp, v, true); err != nil {
- return nil, err
- }
+ v := fn(respMeta)
+ if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil {
+ return nil, err
+ }
- return v, nil
+ if err := rest.UnmarshalResponse(resp, v, true); err != nil {
+ return nil, err
}
- // fallback to unmodeled generic exceptions
- return awserr.NewRequestFailure(
- awserr.New(code, msg, nil),
- respMeta.StatusCode,
- respMeta.RequestID,
- ), nil
+ return v, nil
}
// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
@@ -99,36 +78,80 @@ var UnmarshalErrorHandler = request.NamedHandler{
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
- var jsonErr jsonErrorResponse
- err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body)
+ code, msg, err := unmarshalErrorInfo(r.HTTPResponse)
if err != nil {
r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization,
- "failed to unmarshal response error", err),
+ awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
- code := r.HTTPResponse.Header.Get(errorTypeHeader)
- if code == "" {
- code = jsonErr.Code
- }
- msg := r.HTTPResponse.Header.Get(errorMessageHeader)
- if msg == "" {
- msg = jsonErr.Message
- }
-
- code = strings.SplitN(code, ":", 2)[0]
r.Error = awserr.NewRequestFailure(
- awserr.New(code, jsonErr.Message, nil),
+ awserr.New(code, msg, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
type jsonErrorResponse struct {
+ Type string `json:"__type"`
Code string `json:"code"`
Message string `json:"message"`
}
+
+func (j *jsonErrorResponse) SanitizedCode() string {
+ code := j.Code
+ if len(j.Type) > 0 {
+ code = j.Type
+ }
+ return sanitizeCode(code)
+}
+
+// Remove superfluous components from a restJson error code.
+// - If a : character is present, then take only the contents before the
+// first : character in the value.
+// - If a # character is present, then take only the contents after the first
+// # character in the value.
+//
+// All of the following error values resolve to FooError:
+// - FooError
+// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/
+// - aws.protocoltests.restjson#FooError
+// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/
+func sanitizeCode(code string) string {
+ noColon := strings.SplitN(code, ":", 2)[0]
+ hashSplit := strings.SplitN(noColon, "#", 2)
+ return hashSplit[len(hashSplit)-1]
+}
+
+// attempt to garner error details from the response, preferring header values
+// when present
+func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) {
+ code = sanitizeCode(resp.Header.Get(errorTypeHeader))
+ msg = resp.Header.Get(errorMessageHeader)
+ if len(code) > 0 && len(msg) > 0 {
+ return
+ }
+
+ // a modeled error will have to be re-deserialized later, so the body must
+ // be preserved
+ var buf bytes.Buffer
+ tee := io.TeeReader(resp.Body, &buf)
+ defer func() { resp.Body = ioutil.NopCloser(&buf) }()
+
+ var jsonErr jsonErrorResponse
+ if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF {
+ err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes())
+ return
+ }
+
+ if len(code) == 0 {
+ code = jsonErr.SanitizedCode()
+ }
+ if len(msg) == 0 {
+ msg = jsonErr.Message
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index 2fbb93ae7..58c12bd8c 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -5,6 +5,7 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
+ "math"
"reflect"
"sort"
"strconv"
@@ -14,6 +15,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
+const (
+ floatNaN = "NaN"
+ floatInf = "Infinity"
+ floatNegInf = "-Infinity"
+)
+
// BuildXML will serialize params into an xml.Encoder. Error will be returned
// if the serialization of any of the params or nested values fails.
func BuildXML(params interface{}, e *xml.Encoder) error {
@@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect
// Error will be returned if the value type is unsupported.
func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
var str string
+
switch converted := value.Interface().(type) {
case string:
str = converted
@@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
case int:
str = strconv.Itoa(converted)
case float64:
- str = strconv.FormatFloat(converted, 'f', -1, 64)
+ switch {
+ case math.IsNaN(converted):
+ str = floatNaN
+ case math.IsInf(converted, 1):
+ str = floatInf
+ case math.IsInf(converted, -1):
+ str = floatNegInf
+ default:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ }
case float32:
- str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently.
+ asFloat64 := float64(converted)
+ switch {
+ case math.IsNaN(asFloat64):
+ str = floatNaN
+ case math.IsInf(asFloat64, 1):
+ str = floatInf
+ case math.IsInf(asFloat64, -1):
+ str = floatNegInf
+ default:
+ str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
+ }
case time.Time:
format := tag.Get("timestampFormat")
if len(format) == 0 {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index 107c053f8..44a580a94 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -6,6 +6,7 @@ import (
"encoding/xml"
"fmt"
"io"
+ "math"
"reflect"
"strconv"
"strings"
@@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
}
r.Set(reflect.ValueOf(&v))
case *float64:
- v, err := strconv.ParseFloat(node.Text, 64)
- if err != nil {
- return err
+ var v float64
+ switch {
+ case strings.EqualFold(node.Text, floatNaN):
+ v = math.NaN()
+ case strings.EqualFold(node.Text, floatInf):
+ v = math.Inf(1)
+ case strings.EqualFold(node.Text, floatNegInf):
+ v = math.Inf(-1)
+ default:
+ var err error
+ v, err = strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
}
r.Set(reflect.ValueOf(&v))
case *time.Time:
diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
index 2ac8aa1be..30d4752cd 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go
@@ -75,6 +75,9 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ
//
// Related operations: ScheduleKeyDeletion
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -93,8 +96,8 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -104,10 +107,18 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion
func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) {
req, out := c.CancelKeyDeletionRequest(input)
@@ -175,32 +186,26 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// ConnectCustomKeyStore API operation for AWS Key Management Service.
//
// Connects or reconnects a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// to its associated CloudHSM cluster.
+// to its backing key store. For an CloudHSM key store, ConnectCustomKeyStore
+// connects the key store to its associated CloudHSM cluster. For an external
+// key store, ConnectCustomKeyStore connects the key store to the external key
+// store proxy that communicates with your external key manager.
//
// The custom key store must be connected before you can create KMS keys in
// the key store or use the KMS keys it contains. You can disconnect and reconnect
// a custom key store at any time.
//
-// To connect a custom key store, its associated CloudHSM cluster must have
-// at least one active HSM. To get the number of active HSMs in a cluster, use
-// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
-// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
-// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
-// (CU) must not be logged into the cluster. This prevents KMS from using this
-// account to log in.
-//
-// The connection process can take an extended amount of time to complete; up
-// to 20 minutes. This operation starts the connection process, but it does
-// not wait for it to complete. When it succeeds, this operation quickly returns
-// an HTTP 200 response and a JSON object with no properties. However, this
-// response does not indicate that the custom key store is connected. To get
-// the connection state of the custom key store, use the DescribeCustomKeyStores
+// The connection process for a custom key store can take an extended amount
+// of time to complete. This operation starts the connection process, but it
+// does not wait for it to complete. When it succeeds, this operation quickly
+// returns an HTTP 200 response and a JSON object with no properties. However,
+// this response does not indicate that the custom key store is connected. To
+// get the connection state of the custom key store, use the DescribeCustomKeyStores
// operation.
//
-// During the connection process, KMS finds the CloudHSM cluster that is associated
-// with the custom key store, creates the connection infrastructure, connects
-// to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates
-// its password.
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
//
// The ConnectCustomKeyStore operation might fail for various reasons. To find
// the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode
@@ -210,8 +215,44 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// the custom key store, correct the error, use the UpdateCustomKeyStore operation
// if necessary, and then use ConnectCustomKeyStore again.
//
-// If you are having trouble connecting or disconnecting a custom key store,
-// see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// # CloudHSM key store
+//
+// During the connection process for an CloudHSM key store, KMS finds the CloudHSM
+// cluster that is associated with the custom key store, creates the connection
+// infrastructure, connects to the cluster, logs into the CloudHSM client as
+// the kmsuser CU, and rotates its password.
+//
+// To connect an CloudHSM key store, its associated CloudHSM cluster must have
+// at least one active HSM. To get the number of active HSMs in a cluster, use
+// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
+// (CU) must not be logged into the cluster. This prevents KMS from using this
+// account to log in.
+//
+// If you are having trouble connecting or disconnecting a CloudHSM key store,
+// see Troubleshooting an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// in the Key Management Service Developer Guide.
+//
+// # External key store
+//
+// When you connect an external key store that uses public endpoint connectivity,
+// KMS tests its ability to communicate with your external key manager by sending
+// a request via the external key store proxy.
+//
+// When you connect to an external key store that uses VPC endpoint service
+// connectivity, KMS establishes the networking elements that it needs to communicate
+// with your external key manager via the external key store proxy. This includes
+// creating an interface endpoint to the VPC endpoint service and a private
+// hosted zone for traffic between KMS and the VPC endpoint service.
+//
+// To connect an external key store, KMS must be able to connect to the external
+// key store proxy, the external key store proxy must be able to communicate
+// with your external key manager, and the external key manager must be available
+// for cryptographic operations.
+//
+// If you are having trouble connecting or disconnecting an external key store,
+// see Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html)
// in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
@@ -232,6 +273,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
//
// - UpdateCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -242,10 +286,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
// Returned Error Types:
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - CustomKeyStoreInvalidStateException
@@ -255,17 +298,27 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -277,29 +330,29 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -375,7 +428,7 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// Creates a friendly name for a KMS key.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// You can use an alias to identify a KMS key in the KMS console, in the DescribeKey
@@ -423,6 +476,9 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
//
// - UpdateAlias
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -433,8 +489,8 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - AlreadyExistsException
// The request was rejected because it attempted to create a resource that already
@@ -460,10 +516,18 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias
func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) {
req, out := c.CreateAliasRequest(input)
@@ -530,27 +594,65 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
// CreateCustomKeyStore API operation for AWS Key Management Service.
//
// Creates a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// that is associated with an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html)
-// that you own and manage.
-//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// backed by a key store that you own and manage. When you use a KMS key in
+// a custom key store for a cryptographic operation, the cryptographic operation
+// is actually performed in your key store using your keys. KMS supports CloudHSM
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html)
+// backed by an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html)
+// and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html)
+// backed by an external key store proxy and external key manager outside of
+// Amazon Web Services.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
-// Before you create the custom key store, you must assemble the required elements,
-// including an CloudHSM cluster that fulfills the requirements for a custom
-// key store. For details about the required elements, see Assemble the Prerequisites
-// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// Before you create the custom key store, the required elements must be in
+// place and operational. We recommend that you use the test tools that KMS
+// provides to verify the configuration your external key store proxy. For details
+// about the required elements and verification tests, see Assemble the prerequisites
+// (for CloudHSM key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// or Assemble the prerequisites (for external key stores) (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements)
// in the Key Management Service Developer Guide.
//
+// To create a custom key store, use the following parameters.
+//
+// - To create an CloudHSM key store, specify the CustomKeyStoreName, CloudHsmClusterId,
+// KeyStorePassword, and TrustAnchorCertificate. The CustomKeyStoreType parameter
+// is optional for CloudHSM key stores. If you include it, set it to the
+// default value, AWS_CLOUDHSM. For help with failures, see Troubleshooting
+// an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// in the Key Management Service Developer Guide.
+//
+// - To create an external key store, specify the CustomKeyStoreName and
+// a CustomKeyStoreType of EXTERNAL_KEY_STORE. Also, specify values for XksProxyConnectivity,
+// XksProxyAuthenticationCredential, XksProxyUriEndpoint, and XksProxyUriPath.
+// If your XksProxyConnectivity value is VPC_ENDPOINT_SERVICE, specify the
+// XksProxyVpcEndpointServiceName parameter. For help with failures, see
+// Troubleshooting an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html)
+// in the Key Management Service Developer Guide.
+//
+// For external key stores:
+//
+// Some external key managers provide a simpler method for creating an external
+// key store. For details, see your external key manager documentation.
+//
+// When creating an external key store in the KMS console, you can upload a
+// JSON-based proxy configuration file with the desired values. You cannot use
+// a proxy configuration with the CreateCustomKeyStore operation. However, you
+// can use the values in the file to help you determine the correct values for
+// the CreateCustomKeyStore parameters.
+//
// When the operation completes successfully, it returns the ID of the new custom
// key store. Before you can use your new custom key store, you need to use
-// the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM
-// cluster. Even if you are not going to use your custom key store immediately,
-// you might want to connect it to verify that all settings are correct and
-// then disconnect it until you are ready to use it.
-//
-// For help with failures, see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+// the ConnectCustomKeyStore operation to connect a new CloudHSM key store to
+// its CloudHSM cluster, or to connect a new external key store to the external
+// key store proxy for your external key manager. Even if you are not going
+// to use your custom key store immediately, you might want to connect it to
+// verify that all settings are correct and then disconnect it until you are
+// ready to use it.
+//
+// For help with failures, see Troubleshooting a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
// in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
@@ -571,6 +673,9 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
//
// - UpdateCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -582,12 +687,13 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
//
// - CloudHsmClusterInUseException
// The request was rejected because the specified CloudHSM cluster is already
-// associated with a custom key store or it shares a backup history with a cluster
-// that is associated with a custom key store. Each custom key store must be
-// associated with a different CloudHSM cluster.
+// associated with an CloudHSM key store in the account, or it shares a backup
+// history with an CloudHSM key store in the account. Each CloudHSM key store
+// in the account must be associated with a different CloudHSM cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
//
// - CustomKeyStoreNameInUseException
@@ -604,51 +710,114 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req
// can be retried.
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - IncorrectTrustAnchorException
// The request was rejected because the trust anchor certificate in the request
-// is not the trust anchor certificate for the specified CloudHSM cluster.
+// to create an CloudHSM key store is not the trust anchor certificate for the
+// specified CloudHSM cluster.
//
-// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+// When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - LimitExceededException
+// The request was rejected because a quota was exceeded. For more information,
+// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html)
+// in the Key Management Service Developer Guide.
+//
+// - XksProxyUriInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with another external key store
+// in this Amazon Web Services Region. Each external key store in a Region must
+// use a unique external key store proxy API address.
+//
+// - XksProxyUriEndpointInUseException
+// The request was rejected because the XksProxyUriEndpoint is already associated
+// with another external key store in this Amazon Web Services Region. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - XksProxyUriUnreachableException
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+//
+// - XksProxyIncorrectAuthenticationCredentialException
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+//
+// - XksProxyVpcEndpointServiceInUseException
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with another external key store in this Amazon Web
+// Services Region. Each external key store in a Region must use a different
+// Amazon VPC endpoint service.
+//
+// - XksProxyVpcEndpointServiceNotFoundException
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+//
+// - XksProxyVpcEndpointServiceInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store. To identify
+// the cause, see the error message that accompanies the exception and review
+// the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+//
+// - XksProxyInvalidResponseException
+//
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+//
+// - XksProxyInvalidConfigurationException
+// The request was rejected because the external key store proxy is not configured
+// correctly. To identify the cause, see the error message that accompanies
+// the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore
func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) {
req, out := c.CreateCustomKeyStoreRequest(input)
@@ -766,6 +935,9 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request,
//
// - RevokeGrant
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -783,8 +955,8 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request,
// The request was rejected because the specified KMS key is not enabled.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -806,10 +978,21 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant
func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) {
req, out := c.CreateGrantRequest(input)
@@ -876,13 +1059,21 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// CreateKey API operation for AWS Key Management Service.
//
// Creates a unique customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys)
-// in your Amazon Web Services account and Region.
+// in your Amazon Web Services account and Region. You can use a KMS key in
+// cryptographic operations, such as encryption and signing. Some Amazon Web
+// Services services let you use KMS keys that you create and manage to protect
+// your service resources.
//
-// In addition to the required parameters, you can use the optional parameters
-// to specify a key policy, description, tags, and other useful elements for
-// any key type.
+// A KMS key is a logical representation of a cryptographic key. In addition
+// to the key material used in cryptographic operations, a KMS key includes
+// metadata, such as the key ID, key policy, creation date, description, and
+// key state. For details, see Managing keys (https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html)
+// in the Key Management Service Developer Guide
//
-// KMS is replacing the term customer master key (CMK) with KMS key and KMS
+// Use the parameters of CreateKey to specify the type of KMS key, the source
+// of its key material, its key policy, description, tags, and other properties.
+//
+// KMS has replaced the term customer master key (CMK) with KMS key and KMS
// key. The concept has not changed. To prevent breaking changes, KMS is keeping
// some variations of this term.
//
@@ -890,11 +1081,14 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
//
// # Symmetric encryption KMS key
//
-// To create a symmetric encryption KMS key, you aren't required to specify
-// any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the
-// default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption
-// KMS key. For technical details, see SYMMETRIC_DEFAULT key spec (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-symmetric-default)
-// in the Key Management Service Developer Guide.
+// By default, CreateKey creates a symmetric encryption KMS key with key material
+// that KMS generates. This is the basic and most widely used type of KMS key,
+// and provides the best performance.
+//
+// To create a symmetric encryption KMS key, you don't need to specify any parameters.
+// The default value for KeySpec, SYMMETRIC_DEFAULT, the default value for KeyUsage,
+// ENCRYPT_DECRYPT, and the default value for Origin, AWS_KMS, create a symmetric
+// encryption KMS key with KMS key material.
//
// If you need a key for basic encryption and decryption or you are creating
// a KMS key to protect your resources in an Amazon Web Services service, create
@@ -914,11 +1108,15 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key pair,
// or an SM2 key pair (China Regions only). The private key in an asymmetric
// KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey
-// operation to download the public key so it can be used outside of KMS. KMS
-// keys with RSA or SM2 key pairs can be used to encrypt or decrypt data or
-// sign and verify messages (but not both). KMS keys with ECC key pairs can
-// be used only to sign and verify messages. For information about asymmetric
-// KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
+// operation to download the public key so it can be used outside of KMS. Each
+// KMS key can have only one key usage. KMS keys with RSA key pairs can be used
+// to encrypt and decrypt data or sign and verify messages (but not both). KMS
+// keys with NIST-recommended ECC key pairs can be used to sign and verify messages
+// or derive shared secrets (but not both). KMS keys with ECC_SECG_P256K1 can
+// be used only to sign and verify messages. KMS keys with SM2 key pairs (China
+// Regions only) can be used to either encrypt and decrypt data, sign and verify
+// messages, or derive shared secrets (you must choose one key usage type).
+// For information about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
// in the Key Management Service Developer Guide.
//
// # HMAC KMS key
@@ -933,13 +1131,6 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes
// for messages up to 4096 bytes.
//
-// HMAC KMS keys are not supported in all Amazon Web Services Regions. If you
-// try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC
-// keys are not supported, the CreateKey operation returns an UnsupportedOperationException.
-// For a list of Regions in which HMAC KMS keys are supported, see HMAC keys
-// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html)
-// in the Key Management Service Developer Guide.
-//
// # Multi-Region primary keys
//
// # Imported key material
@@ -965,37 +1156,70 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
// in the Key Management Service Developer Guide.
//
-// To import your own key material, begin by creating a symmetric encryption
-// KMS key with no key material. To do this, use the Origin parameter of CreateKey
-// with a value of EXTERNAL. Next, use GetParametersForImport operation to get
-// a public key and import token, and use the public key to encrypt your key
-// material. Then, use ImportKeyMaterial with your import token to import the
-// key material. For step-by-step instructions, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+// To import your own key material into a KMS key, begin by creating a KMS key
+// with no key material. To do this, use the Origin parameter of CreateKey with
+// a value of EXTERNAL. Next, use GetParametersForImport operation to get a
+// public key and import token. Use the wrapping public key to encrypt your
+// key material. Then, use ImportKeyMaterial with your import token to import
+// the key material. For step-by-step instructions, see Importing Key Material
+// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
// in the Key Management Service Developer Guide .
//
-// This feature supports only symmetric encryption KMS keys, including multi-Region
-// symmetric encryption KMS keys. You cannot import key material into any other
-// type of KMS key.
+// You can import key material into KMS keys of all supported KMS key types:
+// symmetric encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys,
+// and asymmetric signing KMS keys. You can also create multi-Region keys with
+// imported key material. However, you can't import key material into a KMS
+// key in a custom key store.
//
// To create a multi-Region primary key with imported key material, use the
// Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion
// parameter with a value of True. To create replicas of the multi-Region primary
-// key, use the ReplicateKey operation. For more information about multi-Region
-// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
+// key, use the ReplicateKey operation. For instructions, see Importing key
+// material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html).
+// For more information about multi-Region keys, see Multi-Region keys in KMS
+// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
// in the Key Management Service Developer Guide.
//
// # Custom key store
//
-// To create a symmetric encryption KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// use the CustomKeyStoreId parameter to specify the custom key store. You must
-// also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM
-// cluster that is associated with the custom key store must have at least two
-// active HSMs in different Availability Zones in the Amazon Web Services Region.
+// A custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// lets you protect your Amazon Web Services resources using keys in a backing
+// key store that you own and manage. When you request a cryptographic operation
+// with a KMS key in a custom key store, the operation is performed in the backing
+// key store using its cryptographic keys.
+//
+// KMS supports CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html)
+// backed by an CloudHSM cluster and external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html)
+// backed by an external key manager outside of Amazon Web Services. When you
+// create a KMS key in an CloudHSM key store, KMS generates an encryption key
+// in the CloudHSM cluster and associates it with the KMS key. When you create
+// a KMS key in an external key store, you specify an existing encryption key
+// in the external key manager.
+//
+// Some external key managers provide a simpler method for creating a KMS key
+// in an external key store. For details, see your external key manager documentation.
+//
+// Before you create a KMS key in a custom key store, the ConnectionState of
+// the key store must be CONNECTED. To connect the custom key store, use the
+// ConnectCustomKeyStore operation. To find the ConnectionState, use the DescribeCustomKeyStores
+// operation.
//
-// Custom key stores support only symmetric encryption KMS keys. You cannot
-// create an HMAC KMS key or an asymmetric KMS key in a custom key store. For
-// information about custom key stores in KMS see Custom key stores in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// in the Key Management Service Developer Guide .
+// To create a KMS key in a custom key store, use the CustomKeyStoreId. Use
+// the default KeySpec value, SYMMETRIC_DEFAULT, and the default KeyUsage value,
+// ENCRYPT_DECRYPT to create a symmetric encryption key. No other key type is
+// supported in a custom key store.
+//
+// To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html),
+// use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster
+// that is associated with the custom key store must have at least two active
+// HSMs in different Availability Zones in the Amazon Web Services Region.
+//
+// To create a KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html),
+// use the Origin parameter with a value of EXTERNAL_KEY_STORE and an XksKeyId
+// parameter that identifies an existing external key.
+//
+// Some external key managers provide a simpler method for creating a KMS key
+// in an external key store. For details, see your external key manager documentation.
//
// Cross-account use: No. You cannot use this operation to create a KMS key
// in a different Amazon Web Services account.
@@ -1014,6 +1238,9 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
//
// - ScheduleKeyDeletion
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1028,8 +1255,8 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
// or semantically correct.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -1062,49 +1289,83 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - XksKeyInvalidConfigurationException
+// The request was rejected because the external key specified by the XksKeyId
+// parameter did not meet the configuration requirements for an external key
+// store.
+//
+// The external key must be an AES-256 symmetric key that is enabled and performs
+// encryption and decryption.
+//
+// - XksKeyAlreadyInUseException
+// The request was rejected because the (XksKeyId) is already associated with
+// another KMS key in this external key store. Each KMS key in an external key
+// store must be associated with a different external key.
+//
+// - XksKeyNotFoundException
+// The request was rejected because the external key store proxy could not find
+// the external key. This exception is thrown when the value of the XksKeyId
+// parameter doesn't identify a key in the external key manager associated with
+// the external key proxy.
+//
+// Verify that the XksKeyId represents an existing key in the external key manager.
+// Use the key identifier that the external key store proxy uses to identify
+// the key. For details, see the documentation provided with your external key
+// store proxy or key manager.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey
func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) {
req, out := c.CreateKeyRequest(input)
@@ -1192,8 +1453,8 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
//
// The Decrypt operation also decrypts ciphertext that was encrypted outside
// of KMS by the public key in an KMS asymmetric KMS key. However, it cannot
-// decrypt ciphertext produced by other libraries, such as the Amazon Web Services
-// Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/)
+// decrypt symmetric ciphertext produced by other libraries, such as the Amazon
+// Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/)
// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html).
// These libraries return a ciphertext format that is incompatible with KMS.
//
@@ -1209,28 +1470,33 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// KMS key that you intend.
//
// Whenever possible, use key policies to give users permission to call the
-// Decrypt operation on a particular KMS key, instead of using IAM policies.
-// Otherwise, you might create an IAM user policy that gives the user Decrypt
-// permission on all KMS keys. This user could decrypt ciphertext that was encrypted
-// by KMS keys in other accounts if the key policy for the cross-account KMS
-// key permits it. If you must use an IAM policy for Decrypt permissions, limit
+// Decrypt operation on a particular KMS key, instead of using &IAM; policies.
+// Otherwise, you might create an &IAM; policy that gives the user Decrypt permission
+// on all KMS keys. This user could decrypt ciphertext that was encrypted by
+// KMS keys in other accounts if the key policy for the cross-account KMS key
+// permits it. If you must use an IAM policy for Decrypt permissions, limit
// the user to particular KMS keys or particular trusted accounts. For details,
// see Best practices for IAM policies (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices)
// in the Key Management Service Developer Guide.
//
-// Applications in Amazon Web Services Nitro Enclaves can call this operation
-// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c).
-// For information about the supporting parameters, see How Amazon Web Services
-// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+// Decrypt also supports Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html),
+// which provide an isolated compute environment in Amazon EC2. To call Decrypt
+// for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+// or any Amazon Web Services SDK. Use the Recipient parameter to provide the
+// attestation document for the enclave. Instead of the plaintext data, the
+// response includes the plaintext data encrypted with the public key from the
+// attestation document (CiphertextForRecipient). For information about the
+// interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon
+// Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
// in the Key Management Service Developer Guide.
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide.
//
-// Cross-account use: Yes. To perform this operation with a KMS key in a different
-// Amazon Web Services account, specify the key ARN or alias ARN in the value
-// of the KeyId parameter.
+// Cross-account use: Yes. If you use the KeyId parameter to identify a KMS
+// key in a different Amazon Web Services account, specify the key ARN or the
+// alias ARN of the KMS key.
//
// Required permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (key policy)
@@ -1245,6 +1511,9 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
//
// - ReEncrypt
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1290,15 +1559,16 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
// key, use the DescribeKey operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidGrantTokenException
// The request was rejected because the specified grant token is not valid.
@@ -1311,10 +1581,21 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt
func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) {
req, out := c.DecryptRequest(input)
@@ -1384,7 +1665,7 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// Deletes the specified alias.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// Because an alias is not a property of a KMS key, you can delete and change
@@ -1418,6 +1699,9 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
//
// - UpdateAlias
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1428,8 +1712,8 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -1443,10 +1727,18 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias
func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) {
req, out := c.DeleteAliasRequest(input)
@@ -1514,33 +1806,39 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req
// DeleteCustomKeyStore API operation for AWS Key Management Service.
//
// Deletes a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// This operation does not delete the CloudHSM cluster that is associated with
-// the custom key store, or affect any users or keys in the cluster.
+// This operation does not affect any backing elements of the custom key store.
+// It does not delete the CloudHSM cluster that is associated with an CloudHSM
+// key store, or affect any users or keys in the cluster. For an external key
+// store, it does not affect the external key store proxy, external key manager,
+// or any external keys.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
//
// The custom key store that you delete cannot contain any KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys).
// Before deleting the key store, verify that you will never need to use any
// of the KMS keys in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations).
// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store.
-// When the scheduled waiting period expires, the ScheduleKeyDeletion operation
-// deletes the KMS keys. Then it makes a best effort to delete the key material
-// from the associated cluster. However, you might need to manually delete the
-// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
-// from the cluster and its backups.
-//
-// After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to
-// disconnect the key store from KMS. Then, you can delete the custom key store.
-//
-// Instead of deleting the custom key store, consider using DisconnectCustomKeyStore
-// to disconnect it from KMS. While the key store is disconnected, you cannot
-// create or use the KMS keys in the key store. But, you do not need to delete
-// KMS keys and you can reconnect a disconnected custom key store at any time.
+// After the required waiting period expires and all KMS keys are deleted from
+// the custom key store, use DisconnectCustomKeyStore to disconnect the key
+// store from KMS. Then, you can delete the custom key store.
+//
+// For keys in an CloudHSM key store, the ScheduleKeyDeletion operation makes
+// a best effort to delete the key material from the associated cluster. However,
+// you might need to manually delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
+// from the cluster and its backups. KMS never creates, manages, or deletes
+// cryptographic keys in the external key manager associated with an external
+// key store. You must manage them using your external key manager tools.
+//
+// Instead of deleting the custom key store, consider using the DisconnectCustomKeyStore
+// operation to disconnect the custom key store from its backing key store.
+// While the key store is disconnected, you cannot create or use the KMS keys
+// in the key store. But, you do not need to delete KMS keys and you can reconnect
+// a disconnected custom key store at any time.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -1559,6 +1857,9 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req
//
// - UpdateCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1581,17 +1882,27 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -1667,18 +1978,16 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
// DeleteImportedKeyMaterial API operation for AWS Key Management Service.
//
-// Deletes key material that you previously imported. This operation makes the
-// specified KMS key unusable. For more information about importing key material
-// into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+// Deletes key material that was previously imported. This operation makes the
+// specified KMS key temporarily unusable. To restore the usability of the KMS
+// key, reimport the same key material. For more information about importing
+// key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
// in the Key Management Service Developer Guide.
//
// When the specified KMS key is in the PendingDeletion state, this operation
// does not change the KMS key's state. Otherwise, it changes the KMS key's
// state to PendingImport.
//
-// After you delete key material, you can use ImportKeyMaterial to reimport
-// the same key material into the KMS key.
-//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide.
@@ -1695,6 +2004,9 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
//
// - ImportKeyMaterial
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1713,8 +2025,8 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -1728,10 +2040,18 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial
func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) {
req, out := c.DeleteImportedKeyMaterialRequest(input)
@@ -1754,6 +2074,219 @@ func (c *KMS) DeleteImportedKeyMaterialWithContext(ctx aws.Context, input *Delet
return out, req.Send()
}
+const opDeriveSharedSecret = "DeriveSharedSecret"
+
+// DeriveSharedSecretRequest generates a "aws/request.Request" representing the
+// client's request for the DeriveSharedSecret operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeriveSharedSecret for more information on using the DeriveSharedSecret
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeriveSharedSecretRequest method.
+// req, resp := client.DeriveSharedSecretRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeriveSharedSecret
+func (c *KMS) DeriveSharedSecretRequest(input *DeriveSharedSecretInput) (req *request.Request, output *DeriveSharedSecretOutput) {
+ op := &request.Operation{
+ Name: opDeriveSharedSecret,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeriveSharedSecretInput{}
+ }
+
+ output = &DeriveSharedSecretOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeriveSharedSecret API operation for AWS Key Management Service.
+//
+// Derives a shared secret using a key agreement algorithm.
+//
+// You must use an asymmetric NIST-recommended elliptic curve (ECC) or SM2 (China
+// Regions only) KMS key pair with a KeyUsage value of KEY_AGREEMENT to call
+// DeriveSharedSecret.
+//
+// DeriveSharedSecret uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman
+// Primitive (https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar3.pdf#page=60)
+// (ECDH) to establish a key agreement between two peers by deriving a shared
+// secret from their elliptic curve public-private key pairs. You can use the
+// raw shared secret that DeriveSharedSecret returns to derive a symmetric key
+// that can encrypt and decrypt data that is sent between the two peers, or
+// that can generate and verify HMACs. KMS recommends that you follow NIST recommendations
+// for key derivation (https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Cr2.pdf)
+// when using the raw shared secret to derive a symmetric key.
+//
+// The following workflow demonstrates how to establish key agreement over an
+// insecure communication channel using DeriveSharedSecret.
+//
+// Alice calls CreateKey to create an asymmetric KMS key pair with a KeyUsage
+// value of KEY_AGREEMENT.
+//
+// The asymmetric KMS key must use a NIST-recommended elliptic curve (ECC) or
+// SM2 (China Regions only) key spec.
+//
+// Bob creates an elliptic curve key pair.
+//
+// Bob can call CreateKey to create an asymmetric KMS key pair or generate a
+// key pair outside of KMS. Bob's key pair must use the same NIST-recommended
+// elliptic curve (ECC) or SM2 (China Regions ony) curve as Alice.
+//
+// Alice and Bob exchange their public keys through an insecure communication
+// channel (like the internet).
+//
+// Use GetPublicKey to download the public key of your asymmetric KMS key pair.
+//
+// KMS strongly recommends verifying that the public key you receive came from
+// the expected party before using it to derive a shared secret.
+//
+// Alice calls DeriveSharedSecret.
+//
+// KMS uses the private key from the KMS key pair generated in Step 1, Bob's
+// public key, and the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive
+// to derive the shared secret. The private key in your KMS key pair never leaves
+// KMS unencrypted. DeriveSharedSecret returns the raw shared secret.
+//
+// Bob uses the Elliptic Curve Cryptography Cofactor Diffie-Hellman Primitive
+// to calculate the same raw secret using his private key and Alice's public
+// key.
+//
+// To derive a shared secret you must provide a key agreement algorithm, the
+// private key of the caller's asymmetric NIST-recommended elliptic curve or
+// SM2 (China Regions only) KMS key pair, and the public key from your peer's
+// NIST-recommended elliptic curve or SM2 (China Regions only) key pair. The
+// public key can be from another asymmetric KMS key pair or from a key pair
+// generated outside of KMS, but both key pairs must be on the same elliptic
+// curve.
+//
+// The KMS key that you use for this operation must be in a compatible key state.
+// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide.
+//
+// Cross-account use: Yes. To perform this operation with a KMS key in a different
+// Amazon Web Services account, specify the key ARN or alias ARN in the value
+// of the KeyId parameter.
+//
+// Required permissions: kms:DeriveSharedSecret (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (key policy)
+//
+// Related operations:
+//
+// - CreateKey
+//
+// - GetPublicKey
+//
+// - DescribeKey
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Key Management Service's
+// API operation DeriveSharedSecret for usage and error information.
+//
+// Returned Error Types:
+//
+// - NotFoundException
+// The request was rejected because the specified entity or resource could not
+// be found.
+//
+// - DisabledException
+// The request was rejected because the specified KMS key is not enabled.
+//
+// - KeyUnavailableException
+// The request was rejected because the specified KMS key was not available.
+// You can retry the request.
+//
+// - DependencyTimeoutException
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
+//
+// - InvalidGrantTokenException
+// The request was rejected because the specified grant token is not valid.
+//
+// - InvalidKeyUsageException
+// The request was rejected for one of the following reasons:
+//
+// - The KeyUsage value of the KMS key is incompatible with the API operation.
+//
+// - The encryption algorithm or signing algorithm specified for the operation
+// is incompatible with the type of key material in the KMS key (KeySpec).
+//
+// For encrypting, decrypting, re-encrypting, and generating data keys, the
+// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
+// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
+// of a KMS key, use the DescribeKey operation.
+//
+// To find the encryption or signing algorithms supported for a particular KMS
+// key, use the DescribeKey operation.
+//
+// - InternalException
+// The request was rejected because an internal exception occurred. The request
+// can be retried.
+//
+// - InvalidStateException
+// The request was rejected because the state of the specified resource is not
+// valid for this request.
+//
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide .
+//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeriveSharedSecret
+func (c *KMS) DeriveSharedSecret(input *DeriveSharedSecretInput) (*DeriveSharedSecretOutput, error) {
+ req, out := c.DeriveSharedSecretRequest(input)
+ return out, req.Send()
+}
+
+// DeriveSharedSecretWithContext is the same as DeriveSharedSecret with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeriveSharedSecret for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *KMS) DeriveSharedSecretWithContext(ctx aws.Context, input *DeriveSharedSecretInput, opts ...request.Option) (*DeriveSharedSecretOutput, error) {
+ req, out := c.DeriveSharedSecretRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDescribeCustomKeyStores = "DescribeCustomKeyStores"
// DescribeCustomKeyStoresRequest generates a "aws/request.Request" representing the
@@ -1806,30 +2339,37 @@ func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput
// Gets information about custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// in the account and Region.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
+// of KMS with the isolation and control of a key store that you own and manage.
//
// By default, this operation returns information about all custom key stores
// in the account and Region. To get only information about a particular custom
// key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter
// (but not both).
//
-// To determine whether the custom key store is connected to its CloudHSM cluster,
-// use the ConnectionState element in the response. If an attempt to connect
-// the custom key store failed, the ConnectionState value is FAILED and the
-// ConnectionErrorCode element in the response indicates the cause of the failure.
-// For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry.
+// To determine whether the custom key store is connected to its CloudHSM cluster
+// or external key store proxy, use the ConnectionState element in the response.
+// If an attempt to connect the custom key store failed, the ConnectionState
+// value is FAILED and the ConnectionErrorCode element in the response indicates
+// the cause of the failure. For help interpreting the ConnectionErrorCode,
+// see CustomKeyStoresListEntry.
//
// Custom key stores have a DISCONNECTED connection state if the key store has
-// never been connected or you use the DisconnectCustomKeyStore operation to
-// disconnect it. If your custom key store state is CONNECTED but you are having
-// trouble using it, make sure that its associated CloudHSM cluster is active
-// and contains the minimum number of HSMs required for the operation, if any.
-//
-// For help repairing your custom key store, see the Troubleshooting Custom
-// Key Stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
-// topic in the Key Management Service Developer Guide.
+// never been connected or you used the DisconnectCustomKeyStore operation to
+// disconnect it. Otherwise, the connection state is CONNECTED. If your custom
+// key store connection state is CONNECTED but you are having trouble using
+// it, verify that the backing store is active and available. For an CloudHSM
+// key store, verify that the associated CloudHSM cluster is active and contains
+// the minimum number of HSMs required for the operation, if any. For an external
+// key store, verify that the external key store proxy and its associated external
+// key manager are reachable and enabled.
+//
+// For help repairing your CloudHSM key store, see the Troubleshooting CloudHSM
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html).
+// For help repairing your external key store, see the Troubleshooting external
+// key stores (https://docs.aws.amazon.com/kms/latest/developerguide/xks-troubleshooting.html).
+// Both topics are in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
@@ -1849,6 +2389,9 @@ func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput
//
// - UpdateCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -1995,10 +2538,14 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request,
// any) of the key material. It includes fields, like KeySpec, that help you
// distinguish different types of KMS keys. It also displays the key usage (encryption,
// signing, or generating and verifying MACs) and the algorithms that the KMS
-// key supports. For KMS keys in custom key stores, it includes information
-// about the custom key store, such as the key store ID and the CloudHSM cluster
-// ID. For multi-Region keys, it displays the primary key and all related replica
-// keys.
+// key supports.
+//
+// For multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html),
+// DescribeKey displays the primary key and all related replica keys. For KMS
+// keys in CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html),
+// it includes information about the key store, such as the key store ID and
+// the CloudHSM cluster ID. For KMS keys in external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html),
+// it includes the custom key store ID and the ID of the external key.
//
// DescribeKey does not return the following information:
//
@@ -2043,6 +2590,9 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request,
//
// - ListRetirableGrants
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2061,8 +2611,8 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request,
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2153,6 +2703,9 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o
//
// Related operations: EnableKey
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2171,8 +2724,8 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2182,10 +2735,18 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey
func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) {
req, out := c.DisableKeyRequest(input)
@@ -2256,12 +2817,12 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// of the specified symmetric encryption KMS key.
//
// Automatic key rotation is supported only on symmetric encryption KMS keys.
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
// You can enable (EnableKeyRotation) and disable automatic rotation of the
@@ -2290,6 +2851,13 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
//
// - GetKeyRotationStatus
//
+// - ListKeyRotations
+//
+// - RotateKeyOnDemand
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2311,8 +2879,8 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2322,10 +2890,18 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -2397,10 +2973,18 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
// DisconnectCustomKeyStore API operation for AWS Key Management Service.
//
// Disconnects the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// from its associated CloudHSM cluster. While a custom key store is disconnected,
-// you can manage the custom key store and its KMS keys, but you cannot create
-// or use KMS keys in the custom key store. You can reconnect the custom key
-// store at any time.
+// from its backing key store. This operation disconnects an CloudHSM key store
+// from its associated CloudHSM cluster or disconnects an external key store
+// from the external key store proxy that communicates with your external key
+// manager.
+//
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// While a custom key store is disconnected, you can manage the custom key store
+// and its KMS keys, but you cannot create or use its KMS keys. You can reconnect
+// the custom key store at any time.
//
// While a custom key store is disconnected, all attempts to create KMS keys
// in the custom key store or to use existing KMS keys in cryptographic operations
@@ -2408,16 +2992,13 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
// will fail. This action can prevent users from storing and accessing sensitive
// data.
//
+// When you disconnect a custom key store, its ConnectionState changes to Disconnected.
// To find the connection state of a custom key store, use the DescribeCustomKeyStores
// operation. To reconnect a custom key store, use the ConnectCustomKeyStore
// operation.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -2436,6 +3017,9 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
//
// - UpdateCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2452,17 +3036,27 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
@@ -2553,6 +3147,9 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out
//
// Related operations: DisableKey
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2571,8 +3168,8 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2587,10 +3184,18 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey
func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) {
req, out := c.EnableKeyRequest(input)
@@ -2657,31 +3262,38 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
// EnableKeyRotation API operation for AWS Key Management Service.
//
-// Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html)
+// Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable)
// of the specified symmetric encryption KMS key.
//
-// When you enable automatic rotation of acustomer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk),
+// By default, when you enable automatic rotation of a customer managed KMS
+// key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk),
// KMS rotates the key material of the KMS key one year (approximately 365 days)
-// from the enable date and every year thereafter. You can monitor rotation
-// of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.
-// To disable rotation of the key material in a customer managed KMS key, use
-// the DisableKeyRotation operation.
+// from the enable date and every year thereafter. You can use the optional
+// RotationPeriodInDays parameter to specify a custom rotation period when you
+// enable key rotation, or you can use RotationPeriodInDays to modify the rotation
+// period of a key that you previously enabled automatic key rotation on.
+//
+// You can monitor rotation of the key material for your KMS keys in CloudTrail
+// and Amazon CloudWatch. To disable rotation of the key material in a customer
+// managed KMS key, use the DisableKeyRotation operation. You can use the GetKeyRotationStatus
+// operation to identify any in progress rotations. You can use the ListKeyRotations
+// operation to view the details of completed rotations.
//
// Automatic key rotation is supported only on symmetric encryption KMS keys
// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks).
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
-// You cannot enable or disable automatic rotation Amazon Web Services managed
+// You cannot enable or disable automatic rotation of Amazon Web Services managed
// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk).
// KMS always rotates the key material of Amazon Web Services managed keys every
// year. Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk)
-// varies.
+// is managed by the Amazon Web Services service that owns the key.
//
// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed
// keys from every three years (approximately 1,095 days) to every year (approximately
@@ -2709,6 +3321,15 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
//
// - GetKeyRotationStatus
//
+// - ListKeyRotations
+//
+// - RotateKeyOnDemand You can perform on-demand (RotateKeyOnDemand) rotation
+// of the key material in customer managed KMS keys, regardless of whether
+// or not automatic key rotation is enabled.
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2730,8 +3351,8 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -2741,10 +3362,18 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -2878,6 +3507,9 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
//
// - GenerateDataKeyPair
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -2899,8 +3531,8 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -2913,7 +3545,8 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -2930,10 +3563,21 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt
func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) {
req, out := c.EncryptRequest(input)
@@ -3014,9 +3658,9 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data
// keys, use the KeySpec parameter.
//
-// To generate an SM4 data key (China Regions only), specify a KeySpec value
-// of AES_128 or NumberOfBytes value of 128. The symmetric encryption key used
-// in China Regions to encrypt your data key is an SM4 encryption key.
+// To generate a 128-bit SM4 data key (China Regions only), specify a KeySpec
+// value of AES_128 or a NumberOfBytes value of 16. The symmetric encryption
+// key used in China Regions to encrypt your data key is an SM4 encryption key.
//
// To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext.
// To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext
@@ -3029,11 +3673,18 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context)
// in the Key Management Service Developer Guide.
//
-// Applications in Amazon Web Services Nitro Enclaves can call this operation
-// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c).
-// For information about the supporting parameters, see How Amazon Web Services
-// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
-// in the Key Management Service Developer Guide.
+// GenerateDataKey also supports Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html),
+// which provide an isolated compute environment in Amazon EC2. To call GenerateDataKey
+// for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro
+// Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+// or any Amazon Web Services SDK. Use the Recipient parameter to provide the
+// attestation document for the enclave. GenerateDataKey returns a copy of the
+// data key encrypted under the specified KMS key, as usual. But instead of
+// a plaintext copy of the data key, the response includes a copy of the data
+// key encrypted under the public key from the attestation document (CiphertextForRecipient).
+// For information about the interaction between KMS and Amazon Web Services
+// Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+// in the Key Management Service Developer Guide..
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -3085,6 +3736,9 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
//
// - GenerateDataKeyWithoutPlaintext
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3106,8 +3760,8 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3120,7 +3774,8 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -3137,10 +3792,21 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey
func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) {
req, out := c.GenerateDataKeyRequest(input)
@@ -3211,8 +3877,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// private key that is encrypted under the symmetric encryption KMS key you
// specify. You can use the data key pair to perform asymmetric cryptography
// and implement digital signatures outside of KMS. The bytes in the keys are
-// random; they not related to the caller or to the KMS key that is used to
-// encrypt the private key.
+// random; they are not related to the caller or to the KMS key that is used
+// to encrypt the private key.
//
// You can use the public key that GenerateDataKeyPair returns to encrypt data
// or verify a signature outside of KMS. Then, store the encrypted private key
@@ -3245,6 +3911,20 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC
// 5958 (https://tools.ietf.org/html/rfc5958).
//
+// GenerateDataKeyPair also supports Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html),
+// which provide an isolated compute environment in Amazon EC2. To call GenerateDataKeyPair
+// for an Amazon Web Services Nitro enclave, use the Amazon Web Services Nitro
+// Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+// or any Amazon Web Services SDK. Use the Recipient parameter to provide the
+// attestation document for the enclave. GenerateDataKeyPair returns the public
+// data key and a copy of the private data key encrypted under the specified
+// KMS key, as usual. But instead of a plaintext copy of the private data key
+// (PrivateKeyPlaintext), the response includes a copy of the private data key
+// encrypted under the public key from the attestation document (CiphertextForRecipient).
+// For information about the interaction between KMS and Amazon Web Services
+// Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+// in the Key Management Service Developer Guide..
+//
// You can use an optional encryption context to add additional security to
// the encryption operation. If you specify an EncryptionContext, you must specify
// the same encryption context (a case-sensitive exact match) when decrypting
@@ -3275,6 +3955,9 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
//
// - GenerateDataKeyWithoutPlaintext
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3296,8 +3979,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3310,7 +3993,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -3327,14 +4011,25 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair
func (c *KMS) GenerateDataKeyPair(input *GenerateDataKeyPairInput) (*GenerateDataKeyPairOutput, error) {
req, out := c.GenerateDataKeyPairRequest(input)
@@ -3458,6 +4153,9 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
//
// - GenerateDataKeyWithoutPlaintext
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3479,8 +4177,8 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3493,7 +4191,8 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -3510,14 +4209,25 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext
func (c *KMS) GenerateDataKeyPairWithoutPlaintext(input *GenerateDataKeyPairWithoutPlaintextInput) (*GenerateDataKeyPairWithoutPlaintextOutput, error) {
req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input)
@@ -3612,6 +4322,14 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// or a key in a custom key store to generate a data key. To get the type of
// your KMS key, use the DescribeKey operation.
//
+// You must also specify the length of the data key. Use either the KeySpec
+// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data
+// keys, use the KeySpec parameter.
+//
+// To generate an SM4 data key (China Regions only), specify a KeySpec value
+// of AES_128 or NumberOfBytes value of 16. The symmetric encryption key used
+// in China Regions to encrypt your data key is an SM4 encryption key.
+//
// If the operation succeeds, you will find the encrypted copy of the data key
// in the CiphertextBlob field.
//
@@ -3645,6 +4363,9 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
//
// - GenerateDataKeyPairWithoutPlaintext
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3666,8 +4387,8 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -3680,7 +4401,8 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -3697,10 +4419,21 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext
func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) {
req, out := c.GenerateDataKeyWithoutPlaintextRequest(input)
@@ -3767,15 +4500,18 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
// GenerateMac API operation for AWS Key Management Service.
//
// Generates a hash-based message authentication code (HMAC) for a message using
-// an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm
-// computes the HMAC for the message and the key as described in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
-//
-// You can use the HMAC that this operation generates with the VerifyMac operation
-// to demonstrate that the original message has not changed. Also, because a
-// secret key is used to create the hash, you can verify that the party that
-// generated the hash has the required secret key. This operation is part of
-// KMS support for HMAC KMS keys. For details, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html)
-// in the Key Management Service Developer Guide .
+// an HMAC KMS key and a MAC algorithm that the key supports. HMAC KMS keys
+// and the HMAC algorithms that KMS uses conform to industry standards defined
+// in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
+//
+// You can use value that GenerateMac returns in the VerifyMac operation to
+// demonstrate that the original message has not changed. Also, because a secret
+// key is used to create the hash, you can verify that the party that generated
+// the hash has the required secret key. You can also use the raw result to
+// implement HMAC-based algorithms such as key derivation functions. This operation
+// is part of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS
+// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the
+// Key Management Service Developer Guide .
//
// Best practices recommend that you limit the time during which any signing
// mechanism, including an HMAC, is effective. This deters an attack where the
@@ -3797,6 +4533,9 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
//
// Related operations: VerifyMac
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3828,7 +4567,8 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -3845,10 +4585,21 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMac
func (c *KMS) GenerateMac(input *GenerateMacInput) (*GenerateMacOutput, error) {
req, out := c.GenerateMacRequest(input)
@@ -3920,14 +4671,18 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
// byte string. There is no default value for string length.
//
// By default, the random byte string is generated in KMS. To generate the byte
-// string in the CloudHSM cluster that is associated with a custom key store
-// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// specify the custom key store ID.
-//
-// Applications in Amazon Web Services Nitro Enclaves can call this operation
-// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c).
-// For information about the supporting parameters, see How Amazon Web Services
-// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+// string in the CloudHSM cluster associated with an CloudHSM key store, use
+// the CustomKeyStoreId parameter.
+//
+// GenerateRandom also supports Amazon Web Services Nitro Enclaves (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave.html),
+// which provide an isolated compute environment in Amazon EC2. To call GenerateRandom
+// for a Nitro enclave, use the Amazon Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+// or any Amazon Web Services SDK. Use the Recipient parameter to provide the
+// attestation document for the enclave. Instead of plaintext bytes, the response
+// includes the plaintext bytes encrypted under the public key from the attestation
+// document (CiphertextForRecipient).For information about the interaction between
+// KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services Nitro
+// Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
// in the Key Management Service Developer Guide.
//
// For more information about entropy and random number generation, see Key
@@ -3939,6 +4694,9 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
// Required permissions: kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (IAM policy)
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3949,13 +4707,17 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
//
+// - UnsupportedOperationException
+// The request was rejected because a specified parameter is not supported or
+// a specified resource is not valid for this operation.
+//
// - CustomKeyStoreNotFoundException
// The request was rejected because KMS cannot find a custom key store with
// the specified key store name or ID.
@@ -3967,17 +4729,27 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom
func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) {
@@ -4052,7 +4824,10 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques
// Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (key policy)
//
-// Related operations: PutKeyPolicy
+// Related operations: PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4072,8 +4847,8 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -4083,10 +4858,18 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy
func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) {
req, out := c.GetKeyPolicyRequest(input)
@@ -4152,23 +4935,19 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// GetKeyRotationStatus API operation for AWS Key Management Service.
//
-// Gets a Boolean value that indicates whether automatic rotation of the key
-// material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html)
-// is enabled for the specified KMS key.
-//
-// When you enable automatic rotation for customer managed KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk),
-// KMS rotates the key material of the KMS key one year (approximately 365 days)
-// from the enable date and every year thereafter. You can monitor rotation
-// of the key material for your KMS keys in CloudTrail and Amazon CloudWatch.
+// Provides detailed information about the rotation status for a KMS key, including
+// whether automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html)
+// is enabled for the specified KMS key, the rotation period (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotation-period),
+// and the next scheduled rotation date.
//
// Automatic key rotation is supported only on symmetric encryption KMS keys
// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks).
-// You cannot enable or disable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
-// The key rotation status of these KMS keys is always false. To enable or disable
-// automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// To enable or disable automatic rotation of a set of related multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key..
//
// You can enable (EnableKeyRotation) and disable automatic rotation (DisableKeyRotation)
@@ -4178,6 +4957,12 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// managed KMS keys every year. The key rotation status for Amazon Web Services
// managed KMS keys is always true.
//
+// You can perform on-demand (RotateKeyOnDemand) rotation of the key material
+// in customer managed KMS keys, regardless of whether or not automatic key
+// rotation is enabled. You can use GetKeyRotationStatus to identify the date
+// and time that an in progress on-demand rotation was initiated. You can use
+// ListKeyRotations to view the details of completed rotations.
+//
// In May 2022, KMS changed the rotation schedule for Amazon Web Services managed
// keys from every three years to every year. For details, see EnableKeyRotation.
//
@@ -4210,6 +4995,13 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
//
// - EnableKeyRotation
//
+// - ListKeyRotations
+//
+// - RotateKeyOnDemand
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4228,8 +5020,8 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -4239,10 +5031,18 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
@@ -4312,27 +5112,56 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// GetParametersForImport API operation for AWS Key Management Service.
//
-// Returns the items you need to import key material into a symmetric encryption
-// KMS key. For more information about importing key material into KMS, see
-// Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+// Returns the public key and an import token you need to import or reimport
+// key material for a KMS key.
+//
+// By default, KMS keys are created with key material that KMS generates. This
+// operation supports Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
+// an advanced feature that lets you generate and import the cryptographic key
+// material for a KMS key. For more information about importing key material
+// into KMS, see Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
// in the Key Management Service Developer Guide.
//
-// This operation returns a public key and an import token. Use the public key
-// to encrypt the symmetric key material. Store the import token to send with
-// a subsequent ImportKeyMaterial request.
+// Before calling GetParametersForImport, use the CreateKey operation with an
+// Origin value of EXTERNAL to create a KMS key with no key material. You can
+// import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric
+// encryption KMS key, or asymmetric signing KMS key. You can also import key
+// material into a multi-Region key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html)
+// of any supported type. However, you can't import key material into a KMS
+// key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
+// You can also use GetParametersForImport to get a public key and import token
+// to reimport the original key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material)
+// into a KMS key whose key material expired or was deleted.
//
-// You must specify the key ID of the symmetric encryption KMS key into which
-// you will import key material. This KMS key's Origin must be EXTERNAL. You
-// must also specify the wrapping algorithm and type of wrapping key (public
-// key) that you will use to encrypt the key material. You cannot perform this
-// operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in
-// a different Amazon Web Services account.
+// GetParametersForImport returns the items that you need to import your key
+// material.
+//
+// - The public key (or "wrapping key") of an RSA key pair that KMS generates.
+// You will use this public key to encrypt ("wrap") your key material while
+// it's in transit to KMS.
//
-// To import key material, you must use the public key and import token from
-// the same response. These items are valid for 24 hours. The expiration date
-// and time appear in the GetParametersForImport response. You cannot use an
-// expired token in an ImportKeyMaterial request. If your key and token expire,
-// send another GetParametersForImport request.
+// - A import token that ensures that KMS can decrypt your key material and
+// associate it with the correct KMS key.
+//
+// The public key and its import token are permanently linked and must be used
+// together. Each public key and import token set is valid for 24 hours. The
+// expiration date and time appear in the ParametersValidTo field in the GetParametersForImport
+// response. You cannot use an expired public key or import token in an ImportKeyMaterial
+// request. If your key and token expire, send another GetParametersForImport
+// request.
+//
+// GetParametersForImport requires the following information:
+//
+// - The key ID of the KMS key for which you are importing the key material.
+//
+// - The key spec of the public key ("wrapping key") that you will use to
+// encrypt your key material during import.
+//
+// - The wrapping algorithm that you will use with the public key to encrypt
+// your key material.
+//
+// You can use the same or a different public key spec and wrapping algorithm
+// each time you import or reimport the same key material.
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -4350,6 +5179,9 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
//
// - DeleteImportedKeyMaterial
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4368,8 +5200,8 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -4383,10 +5215,18 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput)
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport
func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) {
req, out := c.GetParametersForImportRequest(input)
@@ -4467,11 +5307,6 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// are part of every KMS operation. You also reduce of risk of encrypting data
// that cannot be decrypted. These features are not effective outside of KMS.
//
-// To verify a signature outside of KMS with an SM2 public key (China Regions
-// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
-// as the distinguishing ID. For more information, see Offline verification
-// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
-//
// To help you use the public key safely outside of KMS, GetPublicKey returns
// important information about the public key in the response, including:
//
@@ -4479,7 +5314,8 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521.
//
// - KeyUsage (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage):
-// Whether the key is used for encryption or signing.
+// Whether the key is used for encryption, signing, or deriving a shared
+// secret.
//
// - EncryptionAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms)
// or SigningAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms):
@@ -4493,6 +5329,11 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// algorithm that is not supported by KMS. You can also avoid errors, such as
// using the wrong signing algorithm in a verification operation.
//
+// To verify a signature outside of KMS with an SM2 public key (China Regions
+// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
+// as the distinguishing ID. For more information, see Offline verification
+// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
+//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide.
@@ -4506,6 +5347,9 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
//
// Related operations: CreateKey
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4527,8 +5371,8 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
@@ -4552,7 +5396,8 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -4566,10 +5411,18 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey
func (c *KMS) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) {
req, out := c.GetPublicKeyRequest(input)
@@ -4636,42 +5489,81 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// ImportKeyMaterial API operation for AWS Key Management Service.
//
-// Imports key material into an existing symmetric encryption KMS key that was
-// created without key material. After you successfully import key material
-// into a KMS key, you can reimport the same key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material)
-// into that KMS key, but you cannot import different key material.
+// Imports or reimports key material into an existing KMS key that was created
+// without key material. ImportKeyMaterial also sets the expiration model and
+// expiration date of the imported key material.
//
-// You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key,
-// or on any KMS key in a different Amazon Web Services account. For more information
-// about creating KMS keys with no key material and then importing key material,
-// see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+// By default, KMS keys are created with key material that KMS generates. This
+// operation supports Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
+// an advanced feature that lets you generate and import the cryptographic key
+// material for a KMS key. For more information about importing key material
+// into KMS, see Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
// in the Key Management Service Developer Guide.
//
-// Before using this operation, call GetParametersForImport. Its response includes
-// a public key and an import token. Use the public key to encrypt the key material.
-// Then, submit the import token from the same GetParametersForImport response.
-//
-// When calling this operation, you must specify the following values:
-//
-// - The key ID or key ARN of a KMS key with no key material. Its Origin
-// must be EXTERNAL. To create a KMS key with no key material, call CreateKey
-// and set the value of its Origin parameter to EXTERNAL. To get the Origin
-// of a KMS key, call DescribeKey.)
-//
-// - The encrypted key material. To get the public key to encrypt the key
-// material, call GetParametersForImport.
+// After you successfully import key material into a KMS key, you can reimport
+// the same key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material)
+// into that KMS key, but you cannot import different key material. You might
+// reimport key material to replace key material that expired or key material
+// that you deleted. You might also reimport key material to change the expiration
+// model or expiration date of the key material.
+//
+// Each time you import key material into KMS, you can determine whether (ExpirationModel)
+// and when (ValidTo) the key material expires. To change the expiration of
+// your key material, you must import it again, either by calling ImportKeyMaterial
+// or using the import features (kms/latest/developerguide/importing-keys-import-key-material.html#importing-keys-import-key-material-console)
+// of the KMS console.
+//
+// Before calling ImportKeyMaterial:
+//
+// - Create or identify a KMS key with no key material. The KMS key must
+// have an Origin value of EXTERNAL, which indicates that the KMS key is
+// designed for imported key material. To create an new KMS key for imported
+// key material, call the CreateKey operation with an Origin value of EXTERNAL.
+// You can create a symmetric encryption KMS key, HMAC KMS key, asymmetric
+// encryption KMS key, or asymmetric signing KMS key. You can also import
+// key material into a multi-Region key (kms/latest/developerguide/multi-region-keys-overview.html)
+// of any supported type. However, you can't import key material into a KMS
+// key in a custom key store (kms/latest/developerguide/custom-key-store-overview.html).
+//
+// - Use the DescribeKey operation to verify that the KeyState of the KMS
+// key is PendingImport, which indicates that the KMS key has no key material.
+// If you are reimporting the same key material into an existing KMS key,
+// you might need to call the DeleteImportedKeyMaterial to delete its existing
+// key material.
+//
+// - Call the GetParametersForImport operation to get a public key and import
+// token set for importing key material.
+//
+// - Use the public key in the GetParametersForImport response to encrypt
+// your key material.
+//
+// Then, in an ImportKeyMaterial request, you submit your encrypted key material
+// and import token. When calling this operation, you must specify the following
+// values:
+//
+// - The key ID or key ARN of the KMS key to associate with the imported
+// key material. Its Origin must be EXTERNAL and its KeyState must be PendingImport.
+// You cannot perform this operation on a KMS key in a custom key store (kms/latest/developerguide/custom-key-store-overview.html),
+// or on a KMS key in a different Amazon Web Services account. To get the
+// Origin and KeyState of a KMS key, call DescribeKey.
+//
+// - The encrypted key material.
//
// - The import token that GetParametersForImport returned. You must use
// a public key and token from the same GetParametersForImport response.
//
-// - Whether the key material expires and if so, when. If you set an expiration
+// - Whether the key material expires (ExpirationModel) and, if so, when
+// (ValidTo). For help with this choice, see Setting an expiration time (https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration)
+// in the Key Management Service Developer Guide. If you set an expiration
// date, KMS deletes the key material from the KMS key on the specified date,
-// and the KMS key becomes unusable. To use the KMS key again, you must reimport
-// the same key material. The only way to change an expiration date is by
-// reimporting the same key material and specifying a new expiration date.
+// making the KMS key unusable. To use the KMS key in cryptographic operations
+// again, you must reimport the same key material. However, you can delete
+// and reimport the key material at any time, including before the key material
+// expires. Each time you reimport, you can eliminate or reset the expiration
+// time.
//
// When this operation is successful, the key state of the KMS key changes from
-// PendingImport to Enabled, and you can use the KMS key.
+// PendingImport to Enabled, and you can use the KMS key in cryptographic operations.
//
// If this operation fails, use the exception to help determine the problem.
// If the error is related to the key material, the import token, or wrapping
@@ -4696,6 +5588,9 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
//
// - GetParametersForImport
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4714,8 +5609,8 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// a specified resource is not valid for this operation.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -4729,10 +5624,18 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InvalidCiphertextException
// From the Decrypt or ReEncrypt operation, the request was rejected because
// the specified ciphertext, or additional authenticated data incorporated into
@@ -4863,6 +5766,9 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request,
//
// - UpdateAlias
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -4873,8 +5779,8 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -5047,6 +5953,9 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
//
// - RevokeGrant
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5061,8 +5970,8 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -5083,10 +5992,18 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants
func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) {
req, out := c.ListGrantsRequest(input)
@@ -5223,7 +6140,10 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
//
// - GetKeyPolicy
//
-// - PutKeyPolicy
+// - PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5243,8 +6163,8 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -5254,10 +6174,18 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies
func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) {
req, out := c.ListKeyPoliciesRequest(input)
@@ -5331,34 +6259,34 @@ func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPol
return p.Err()
}
-const opListKeys = "ListKeys"
+const opListKeyRotations = "ListKeyRotations"
-// ListKeysRequest generates a "aws/request.Request" representing the
-// client's request for the ListKeys operation. The "output" return
+// ListKeyRotationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListKeyRotations operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
-// See ListKeys for more information on using the ListKeys
+// See ListKeyRotations for more information on using the ListKeyRotations
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// // Example sending a request using the ListKeysRequest method.
-// req, resp := client.ListKeysRequest(params)
+// // Example sending a request using the ListKeyRotationsRequest method.
+// req, resp := client.ListKeyRotationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys
-func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyRotations
+func (c *KMS) ListKeyRotationsRequest(input *ListKeyRotationsInput) (req *request.Request, output *ListKeyRotationsOutput) {
op := &request.Operation{
- Name: opListKeys,
+ Name: opListKeyRotations,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
@@ -5370,114 +6298,148 @@ func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, outpu
}
if input == nil {
- input = &ListKeysInput{}
+ input = &ListKeyRotationsInput{}
}
- output = &ListKeysOutput{}
+ output = &ListKeyRotationsOutput{}
req = c.newRequest(op, input, output)
return
}
-// ListKeys API operation for AWS Key Management Service.
+// ListKeyRotations API operation for AWS Key Management Service.
//
-// Gets a list of all KMS keys in the caller's Amazon Web Services account and
-// Region.
+// Returns information about all completed key material rotations for the specified
+// KMS key.
+//
+// You must specify the KMS key in all requests. You can refine the key rotations
+// list by limiting the number of rotations returned.
+//
+// For detailed information about automatic and on-demand key rotations, see
+// Rotating KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html)
+// in the Key Management Service Developer Guide.
//
// Cross-account use: No. You cannot perform this operation on a KMS key in
// a different Amazon Web Services account.
//
-// Required permissions: kms:ListKeys (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (IAM policy)
+// Required permissions: kms:ListKeyRotations (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (key policy)
//
// Related operations:
//
-// - CreateKey
+// - EnableKeyRotation
//
-// - DescribeKey
+// - DisableKeyRotation
//
-// - ListAliases
+// - GetKeyRotationStatus
//
-// - ListResourceTags
+// - RotateKeyOnDemand
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Key Management Service's
-// API operation ListKeys for usage and error information.
+// API operation ListKeyRotations for usage and error information.
//
// Returned Error Types:
//
-// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// - NotFoundException
+// The request was rejected because the specified entity or resource could not
+// be found.
//
-// - InternalException
-// The request was rejected because an internal exception occurred. The request
-// can be retried.
+// - InvalidArnException
+// The request was rejected because a specified ARN, or an ARN in a key policy,
+// is not valid.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
// should next begin is not valid.
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys
-func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) {
- req, out := c.ListKeysRequest(input)
+// - InternalException
+// The request was rejected because an internal exception occurred. The request
+// can be retried.
+//
+// - InvalidStateException
+// The request was rejected because the state of the specified resource is not
+// valid for this request.
+//
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide .
+//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - UnsupportedOperationException
+// The request was rejected because a specified parameter is not supported or
+// a specified resource is not valid for this operation.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyRotations
+func (c *KMS) ListKeyRotations(input *ListKeyRotationsInput) (*ListKeyRotationsOutput, error) {
+ req, out := c.ListKeyRotationsRequest(input)
return out, req.Send()
}
-// ListKeysWithContext is the same as ListKeys with the addition of
+// ListKeyRotationsWithContext is the same as ListKeyRotations with the addition of
// the ability to pass a context and additional request options.
//
-// See ListKeys for details on how to use this API operation.
+// See ListKeyRotations for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
-func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts ...request.Option) (*ListKeysOutput, error) {
- req, out := c.ListKeysRequest(input)
+func (c *KMS) ListKeyRotationsWithContext(ctx aws.Context, input *ListKeyRotationsInput, opts ...request.Option) (*ListKeyRotationsOutput, error) {
+ req, out := c.ListKeyRotationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
-// ListKeysPages iterates over the pages of a ListKeys operation,
+// ListKeyRotationsPages iterates over the pages of a ListKeyRotations operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
-// See ListKeys method for more information on how to use this operation.
+// See ListKeyRotations method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
-// // Example iterating over at most 3 pages of a ListKeys operation.
+// // Example iterating over at most 3 pages of a ListKeyRotations operation.
// pageNum := 0
-// err := client.ListKeysPages(params,
-// func(page *kms.ListKeysOutput, lastPage bool) bool {
+// err := client.ListKeyRotationsPages(params,
+// func(page *kms.ListKeyRotationsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
-func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(*ListKeysOutput, bool) bool) error {
- return c.ListKeysPagesWithContext(aws.BackgroundContext(), input, fn)
+func (c *KMS) ListKeyRotationsPages(input *ListKeyRotationsInput, fn func(*ListKeyRotationsOutput, bool) bool) error {
+ return c.ListKeyRotationsPagesWithContext(aws.BackgroundContext(), input, fn)
}
-// ListKeysPagesWithContext same as ListKeysPages except
+// ListKeyRotationsPagesWithContext same as ListKeyRotationsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
-func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn func(*ListKeysOutput, bool) bool, opts ...request.Option) error {
+func (c *KMS) ListKeyRotationsPagesWithContext(ctx aws.Context, input *ListKeyRotationsInput, fn func(*ListKeyRotationsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
- var inCpy *ListKeysInput
+ var inCpy *ListKeyRotationsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
- req, _ := c.ListKeysRequest(inCpy)
+ req, _ := c.ListKeyRotationsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
@@ -5485,7 +6447,7 @@ func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn
}
for p.Next() {
- if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) {
+ if !fn(p.Page().(*ListKeyRotationsOutput), !p.HasNextPage()) {
break
}
}
@@ -5493,34 +6455,34 @@ func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn
return p.Err()
}
-const opListResourceTags = "ListResourceTags"
+const opListKeys = "ListKeys"
-// ListResourceTagsRequest generates a "aws/request.Request" representing the
-// client's request for the ListResourceTags operation. The "output" return
+// ListKeysRequest generates a "aws/request.Request" representing the
+// client's request for the ListKeys operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
-// See ListResourceTags for more information on using the ListResourceTags
+// See ListKeys for more information on using the ListKeys
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// // Example sending a request using the ListResourceTagsRequest method.
-// req, resp := client.ListResourceTagsRequest(params)
+// // Example sending a request using the ListKeysRequest method.
+// req, resp := client.ListKeysRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags
-func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *request.Request, output *ListResourceTagsOutput) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys
+func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) {
op := &request.Operation{
- Name: opListResourceTags,
+ Name: opListKeys,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
@@ -5532,81 +6494,249 @@ func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *reques
}
if input == nil {
- input = &ListResourceTagsInput{}
+ input = &ListKeysInput{}
}
- output = &ListResourceTagsOutput{}
+ output = &ListKeysOutput{}
req = c.newRequest(op, input, output)
return
}
-// ListResourceTags API operation for AWS Key Management Service.
-//
-// Returns all tags on the specified KMS key.
+// ListKeys API operation for AWS Key Management Service.
//
-// For general information about tags, including the format and syntax, see
-// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
-// in the Amazon Web Services General Reference. For information about using
-// tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html).
+// Gets a list of all KMS keys in the caller's Amazon Web Services account and
+// Region.
//
// Cross-account use: No. You cannot perform this operation on a KMS key in
// a different Amazon Web Services account.
//
-// Required permissions: kms:ListResourceTags (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
-// (key policy)
+// Required permissions: kms:ListKeys (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (IAM policy)
//
// Related operations:
//
// - CreateKey
//
-// - ReplicateKey
+// - DescribeKey
//
-// - TagResource
+// - ListAliases
//
-// - UntagResource
+// - ListResourceTags
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Key Management Service's
-// API operation ListResourceTags for usage and error information.
+// API operation ListKeys for usage and error information.
//
// Returned Error Types:
//
+// - DependencyTimeoutException
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
+//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
//
-// - NotFoundException
-// The request was rejected because the specified entity or resource could not
-// be found.
-//
-// - InvalidArnException
-// The request was rejected because a specified ARN, or an ARN in a key policy,
-// is not valid.
-//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
// should next begin is not valid.
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags
-func (c *KMS) ListResourceTags(input *ListResourceTagsInput) (*ListResourceTagsOutput, error) {
- req, out := c.ListResourceTagsRequest(input)
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys
+func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) {
+ req, out := c.ListKeysRequest(input)
return out, req.Send()
}
-// ListResourceTagsWithContext is the same as ListResourceTags with the addition of
+// ListKeysWithContext is the same as ListKeys with the addition of
// the ability to pass a context and additional request options.
//
-// See ListResourceTags for details on how to use this API operation.
+// See ListKeys for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
-func (c *KMS) ListResourceTagsWithContext(ctx aws.Context, input *ListResourceTagsInput, opts ...request.Option) (*ListResourceTagsOutput, error) {
- req, out := c.ListResourceTagsRequest(input)
+func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts ...request.Option) (*ListKeysOutput, error) {
+ req, out := c.ListKeysRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListKeysPages iterates over the pages of a ListKeys operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListKeys method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListKeys operation.
+// pageNum := 0
+// err := client.ListKeysPages(params,
+// func(page *kms.ListKeysOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(*ListKeysOutput, bool) bool) error {
+ return c.ListKeysPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListKeysPagesWithContext same as ListKeysPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn func(*ListKeysOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListKeysInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListKeysRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
+const opListResourceTags = "ListResourceTags"
+
+// ListResourceTagsRequest generates a "aws/request.Request" representing the
+// client's request for the ListResourceTags operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListResourceTags for more information on using the ListResourceTags
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ListResourceTagsRequest method.
+// req, resp := client.ListResourceTagsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags
+func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *request.Request, output *ListResourceTagsOutput) {
+ op := &request.Operation{
+ Name: opListResourceTags,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker"},
+ LimitToken: "Limit",
+ TruncationToken: "Truncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListResourceTagsInput{}
+ }
+
+ output = &ListResourceTagsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListResourceTags API operation for AWS Key Management Service.
+//
+// Returns all tags on the specified KMS key.
+//
+// For general information about tags, including the format and syntax, see
+// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
+// in the Amazon Web Services General Reference. For information about using
+// tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html).
+//
+// Cross-account use: No. You cannot perform this operation on a KMS key in
+// a different Amazon Web Services account.
+//
+// Required permissions: kms:ListResourceTags (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (key policy)
+//
+// Related operations:
+//
+// - CreateKey
+//
+// - ReplicateKey
+//
+// - TagResource
+//
+// - UntagResource
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Key Management Service's
+// API operation ListResourceTags for usage and error information.
+//
+// Returned Error Types:
+//
+// - InternalException
+// The request was rejected because an internal exception occurred. The request
+// can be retried.
+//
+// - NotFoundException
+// The request was rejected because the specified entity or resource could not
+// be found.
+//
+// - InvalidArnException
+// The request was rejected because a specified ARN, or an ARN in a key policy,
+// is not valid.
+//
+// - InvalidMarkerException
+// The request was rejected because the marker that specifies where pagination
+// should next begin is not valid.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags
+func (c *KMS) ListResourceTags(input *ListResourceTagsInput) (*ListResourceTagsOutput, error) {
+ req, out := c.ListResourceTagsRequest(input)
+ return out, req.Send()
+}
+
+// ListResourceTagsWithContext is the same as ListResourceTags with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListResourceTags for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *KMS) ListResourceTagsWithContext(ctx aws.Context, input *ListResourceTagsInput, opts ...request.Option) (*ListResourceTagsOutput, error) {
+ req, out := c.ListResourceTagsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
@@ -5727,14 +6857,22 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *
// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html).
//
// Cross-account use: You must specify a principal in your Amazon Web Services
-// account. However, this operation can return grants in any Amazon Web Services
-// account. You do not need kms:ListRetirableGrants permission (or any other
-// additional permission) in any Amazon Web Services account other than your
-// own.
+// account. This operation returns a list of grants where the retiring principal
+// specified in the ListRetirableGrants request is the same retiring principal
+// on the grant. This can include grants on KMS keys owned by other Amazon Web
+// Services accounts, but you do not need kms:ListRetirableGrants permission
+// (or any other additional permission) in any Amazon Web Services account other
+// than your own.
//
// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
// (IAM policy) in your Amazon Web Services account.
//
+// KMS authorizes ListRetirableGrants requests by evaluating the caller account's
+// kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants
+// calls is the retiring principal specified in the request. KMS does not evaluate
+// the caller's permissions to verify their access to any KMS keys or grants
+// that might be returned by the ListRetirableGrants call.
+//
// Related operations:
//
// - CreateGrant
@@ -5745,6 +6883,9 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *
//
// - RevokeGrant
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5755,8 +6896,8 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidMarkerException
// The request was rejected because the marker that specifies where pagination
@@ -5909,6 +7050,9 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques
//
// Related operations: GetKeyPolicy
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -5931,8 +7075,8 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques
// or semantically correct.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - UnsupportedOperationException
// The request was rejected because a specified parameter is not supported or
@@ -5951,10 +7095,18 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy
func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) {
req, out := c.PutKeyPolicyRequest(input)
@@ -6056,20 +7208,20 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// was encrypted under a different KMS key, the ReEncrypt operation fails.
// This practice ensures that you use the KMS key that you intend.
//
-// - To reencrypt the data, you must use the DestinationKeyId parameter specify
-// the KMS key that re-encrypts the data after it is decrypted. If the destination
-// KMS key is an asymmetric KMS key, you must also provide the encryption
-// algorithm. The algorithm that you choose must be compatible with the KMS
-// key. When you use an asymmetric KMS key to encrypt or reencrypt data,
-// be sure to record the KMS key and encryption algorithm that you choose.
-// You will be required to provide the same KMS key and encryption algorithm
-// when you decrypt the data. If the KMS key and algorithm do not match the
-// values used to encrypt the data, the decrypt operation fails. You are
-// not required to supply the key ID and encryption algorithm when you decrypt
-// with symmetric encryption KMS keys because KMS stores this information
-// in the ciphertext blob. KMS cannot store metadata in ciphertext generated
-// with asymmetric keys. The standard format for asymmetric key ciphertext
-// does not include configurable fields.
+// - To reencrypt the data, you must use the DestinationKeyId parameter to
+// specify the KMS key that re-encrypts the data after it is decrypted. If
+// the destination KMS key is an asymmetric KMS key, you must also provide
+// the encryption algorithm. The algorithm that you choose must be compatible
+// with the KMS key. When you use an asymmetric KMS key to encrypt or reencrypt
+// data, be sure to record the KMS key and encryption algorithm that you
+// choose. You will be required to provide the same KMS key and encryption
+// algorithm when you decrypt the data. If the KMS key and algorithm do not
+// match the values used to encrypt the data, the decrypt operation fails.
+// You are not required to supply the key ID and encryption algorithm when
+// you decrypt with symmetric encryption KMS keys because KMS stores this
+// information in the ciphertext blob. KMS cannot store metadata in ciphertext
+// generated with asymmetric keys. The standard format for asymmetric key
+// ciphertext does not include configurable fields.
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -6105,6 +7257,9 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
//
// - GenerateDataKeyPair
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6140,8 +7295,8 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// must identify the same KMS key that was used to encrypt the ciphertext.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -6154,7 +7309,8 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -6171,10 +7327,21 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt
func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) {
req, out := c.ReEncryptRequest(input)
@@ -6297,7 +7464,7 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques
// If you replicate a multi-Region primary key with imported key material, the
// replica key is created with no key material. You must import the same key
// material that you imported into the primary key. For details, see Importing
-// key material into multi-Region keys (kms/latest/developerguide/multi-region-keys-import.html)
+// key material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html)
// in the Key Management Service Developer Guide.
//
// To convert a replica key to a primary key, use the UpdatePrimaryRegion operation.
@@ -6324,6 +7491,9 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques
//
// - UpdatePrimaryRegion
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6348,10 +7518,18 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
@@ -6462,7 +7640,7 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// Cross-account use: Yes. You can retire a grant on a KMS key in a different
// Amazon Web Services account.
//
-// Required permissions::Permission to retire a grant is determined primarily
+// Required permissions: Permission to retire a grant is determined primarily
// by the grant. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete)
// in the Key Management Service Developer Guide.
//
@@ -6476,6 +7654,9 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
//
// - RevokeGrant
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6500,8 +7681,8 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -6511,10 +7692,21 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant
func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) {
req, out := c.RetireGrantRequest(input)
@@ -6583,7 +7775,7 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
//
// Deletes the specified grant. You revoke a grant to terminate the permissions
// that the grant allows. For more information, see Retiring and revoking grants
-// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete)
+// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete)
// in the Key Management Service Developer Guide .
//
// When you create, retire, or revoke a grant, there might be a brief delay,
@@ -6614,6 +7806,9 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
//
// - RetireGrant
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6628,8 +7823,8 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
// be found.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
@@ -6646,10 +7841,21 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant
func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) {
req, out := c.RevokeGrantRequest(input)
@@ -6672,85 +7878,85 @@ func (c *KMS) RevokeGrantWithContext(ctx aws.Context, input *RevokeGrantInput, o
return out, req.Send()
}
-const opScheduleKeyDeletion = "ScheduleKeyDeletion"
+const opRotateKeyOnDemand = "RotateKeyOnDemand"
-// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the
-// client's request for the ScheduleKeyDeletion operation. The "output" return
+// RotateKeyOnDemandRequest generates a "aws/request.Request" representing the
+// client's request for the RotateKeyOnDemand operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
-// See ScheduleKeyDeletion for more information on using the ScheduleKeyDeletion
+// See RotateKeyOnDemand for more information on using the RotateKeyOnDemand
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// // Example sending a request using the ScheduleKeyDeletionRequest method.
-// req, resp := client.ScheduleKeyDeletionRequest(params)
+// // Example sending a request using the RotateKeyOnDemandRequest method.
+// req, resp := client.RotateKeyOnDemandRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion
-func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) {
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RotateKeyOnDemand
+func (c *KMS) RotateKeyOnDemandRequest(input *RotateKeyOnDemandInput) (req *request.Request, output *RotateKeyOnDemandOutput) {
op := &request.Operation{
- Name: opScheduleKeyDeletion,
+ Name: opRotateKeyOnDemand,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
- input = &ScheduleKeyDeletionInput{}
+ input = &RotateKeyOnDemandInput{}
}
- output = &ScheduleKeyDeletionOutput{}
+ output = &RotateKeyOnDemandOutput{}
req = c.newRequest(op, input, output)
return
}
-// ScheduleKeyDeletion API operation for AWS Key Management Service.
+// RotateKeyOnDemand API operation for AWS Key Management Service.
//
-// Schedules the deletion of a KMS key. By default, KMS applies a waiting period
-// of 30 days, but you can specify a waiting period of 7-30 days. When this
-// operation is successful, the key state of the KMS key changes to PendingDeletion
-// and the key can't be used in any cryptographic operations. It remains in
-// this state for the duration of the waiting period. Before the waiting period
-// ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key.
-// After the waiting period ends, KMS deletes the KMS key, its key material,
-// and all KMS data associated with it, including all aliases that refer to
-// it.
+// Immediately initiates rotation of the key material of the specified symmetric
+// encryption KMS key.
//
-// Deleting a KMS key is a destructive and potentially dangerous operation.
-// When a KMS key is deleted, all data that was encrypted under the KMS key
-// is unrecoverable. (The only exception is a multi-Region replica key.) To
-// prevent the use of a KMS key without deleting it, use DisableKey.
+// You can perform on-demand rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-on-demand)
+// of the key material in customer managed KMS keys, regardless of whether or
+// not automatic key rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable)
+// is enabled. On-demand rotations do not change existing automatic rotation
+// schedules. For example, consider a KMS key that has automatic key rotation
+// enabled with a rotation period of 730 days. If the key is scheduled to automatically
+// rotate on April 14, 2024, and you perform an on-demand rotation on April
+// 10, 2024, the key will automatically rotate, as scheduled, on April 14, 2024
+// and every 730 days thereafter.
//
-// If you schedule deletion of a KMS key from a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
-// when the waiting period expires, ScheduleKeyDeletion deletes the KMS key
-// from KMS. Then KMS makes a best effort to delete the key material from the
-// associated CloudHSM cluster. However, you might need to manually delete the
-// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
-// from the cluster and its backups.
+// You can perform on-demand key rotation a maximum of 10 times per KMS key.
+// You can use the KMS console to view the number of remaining on-demand rotations
+// available for a KMS key.
//
-// You can schedule the deletion of a multi-Region primary key and its replica
-// keys at any time. However, KMS will not delete a multi-Region primary key
-// with existing replica keys. If you schedule the deletion of a primary key
-// with replicas, its key state changes to PendingReplicaDeletion and it cannot
-// be replicated or used in cryptographic operations. This status can continue
-// indefinitely. When the last of its replicas keys is deleted (not just scheduled),
-// the key state of the primary key changes to PendingDeletion and its waiting
-// period (PendingWindowInDays) begins. For details, see Deleting multi-Region
-// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
-// in the Key Management Service Developer Guide.
+// You can use GetKeyRotationStatus to identify any in progress on-demand rotations.
+// You can use ListKeyRotations to identify the date that completed on-demand
+// rotations were performed. You can monitor rotation of the key material for
+// your KMS keys in CloudTrail and Amazon CloudWatch.
//
-// For more information about scheduling a KMS key for deletion, see Deleting
-// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html)
-// in the Key Management Service Developer Guide.
+// On-demand key rotation is supported only on symmetric encryption KMS keys
+// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks).
+// You cannot perform on-demand rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
+// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
+// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
+// To perform on-demand rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+// invoke the on-demand rotation on the primary key.
+//
+// You cannot initiate on-demand rotation of Amazon Web Services managed KMS
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk).
+// KMS always rotates the key material of Amazon Web Services managed keys every
+// year. Rotation of Amazon Web Services owned KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk)
+// is managed by the Amazon Web Services service that owns the key.
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -6759,20 +7965,28 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// Cross-account use: No. You cannot perform this operation on a KMS key in
// a different Amazon Web Services account.
//
-// Required permissions: kms:ScheduleKeyDeletion (key policy)
+// Required permissions: kms:RotateKeyOnDemand (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
+// (key policy)
//
-// Related operations
+// Related operations:
//
-// - CancelKeyDeletion
+// - EnableKeyRotation
//
-// - DisableKey
+// - DisableKeyRotation
+//
+// - GetKeyRotationStatus
+//
+// - ListKeyRotations
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for AWS Key Management Service's
-// API operation ScheduleKeyDeletion for usage and error information.
+// API operation RotateKeyOnDemand for usage and error information.
//
// Returned Error Types:
//
@@ -6780,13 +7994,16 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// The request was rejected because the specified entity or resource could not
// be found.
//
+// - DisabledException
+// The request was rejected because the specified KMS key is not enabled.
+//
// - InvalidArnException
// The request was rejected because a specified ARN, or an ARN in a key policy,
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -6796,13 +8013,201 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion
-func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) {
- req, out := c.ScheduleKeyDeletionRequest(input)
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - UnsupportedOperationException
+// The request was rejected because a specified parameter is not supported or
+// a specified resource is not valid for this operation.
+//
+// - LimitExceededException
+// The request was rejected because a quota was exceeded. For more information,
+// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html)
+// in the Key Management Service Developer Guide.
+//
+// - ConflictException
+// The request was rejected because an automatic rotation of this key is currently
+// in progress or scheduled to begin within the next 20 minutes.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RotateKeyOnDemand
+func (c *KMS) RotateKeyOnDemand(input *RotateKeyOnDemandInput) (*RotateKeyOnDemandOutput, error) {
+ req, out := c.RotateKeyOnDemandRequest(input)
+ return out, req.Send()
+}
+
+// RotateKeyOnDemandWithContext is the same as RotateKeyOnDemand with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RotateKeyOnDemand for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *KMS) RotateKeyOnDemandWithContext(ctx aws.Context, input *RotateKeyOnDemandInput, opts ...request.Option) (*RotateKeyOnDemandOutput, error) {
+ req, out := c.RotateKeyOnDemandRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opScheduleKeyDeletion = "ScheduleKeyDeletion"
+
+// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the
+// client's request for the ScheduleKeyDeletion operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ScheduleKeyDeletion for more information on using the ScheduleKeyDeletion
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ScheduleKeyDeletionRequest method.
+// req, resp := client.ScheduleKeyDeletionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion
+func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) {
+ op := &request.Operation{
+ Name: opScheduleKeyDeletion,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ScheduleKeyDeletionInput{}
+ }
+
+ output = &ScheduleKeyDeletionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ScheduleKeyDeletion API operation for AWS Key Management Service.
+//
+// Schedules the deletion of a KMS key. By default, KMS applies a waiting period
+// of 30 days, but you can specify a waiting period of 7-30 days. When this
+// operation is successful, the key state of the KMS key changes to PendingDeletion
+// and the key can't be used in any cryptographic operations. It remains in
+// this state for the duration of the waiting period. Before the waiting period
+// ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key.
+// After the waiting period ends, KMS deletes the KMS key, its key material,
+// and all KMS data associated with it, including all aliases that refer to
+// it.
+//
+// Deleting a KMS key is a destructive and potentially dangerous operation.
+// When a KMS key is deleted, all data that was encrypted under the KMS key
+// is unrecoverable. (The only exception is a multi-Region replica key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html),
+// or an asymmetric or HMAC KMS key with imported key material (kms/latest/developerguide/importing-keys-managing.html#import-delete-key).)
+// To prevent the use of a KMS key without deleting it, use DisableKey.
+//
+// You can schedule the deletion of a multi-Region primary key and its replica
+// keys at any time. However, KMS will not delete a multi-Region primary key
+// with existing replica keys. If you schedule the deletion of a primary key
+// with replicas, its key state changes to PendingReplicaDeletion and it cannot
+// be replicated or used in cryptographic operations. This status can continue
+// indefinitely. When the last of its replicas keys is deleted (not just scheduled),
+// the key state of the primary key changes to PendingDeletion and its waiting
+// period (PendingWindowInDays) begins. For details, see Deleting multi-Region
+// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html)
+// in the Key Management Service Developer Guide.
+//
+// When KMS deletes a KMS key from an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-cmk-keystore.html),
+// it makes a best effort to delete the associated key material from the associated
+// CloudHSM cluster. However, you might need to manually delete the orphaned
+// key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key)
+// from the cluster and its backups. Deleting a KMS key from an external key
+// store (https://docs.aws.amazon.com/kms/latest/developerguide/delete-xks-key.html)
+// has no effect on the associated external key. However, for both types of
+// custom key stores, deleting a KMS key is destructive and irreversible. You
+// cannot decrypt ciphertext encrypted under the KMS key by using only its associated
+// external key or CloudHSM key. Also, you cannot recreate a KMS key in an external
+// key store by creating a new KMS key with the same key material.
+//
+// For more information about scheduling a KMS key for deletion, see Deleting
+// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html)
+// in the Key Management Service Developer Guide.
+//
+// The KMS key that you use for this operation must be in a compatible key state.
+// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide.
+//
+// Cross-account use: No. You cannot perform this operation on a KMS key in
+// a different Amazon Web Services account.
+//
+// Required permissions: kms:ScheduleKeyDeletion (key policy)
+//
+// Related operations
+//
+// - CancelKeyDeletion
+//
+// - DisableKey
+//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Key Management Service's
+// API operation ScheduleKeyDeletion for usage and error information.
+//
+// Returned Error Types:
+//
+// - NotFoundException
+// The request was rejected because the specified entity or resource could not
+// be found.
+//
+// - InvalidArnException
+// The request was rejected because a specified ARN, or an ARN in a key policy,
+// is not valid.
+//
+// - DependencyTimeoutException
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
+//
+// - InternalException
+// The request was rejected because an internal exception occurred. The request
+// can be retried.
+//
+// - InvalidStateException
+// The request was rejected because the state of the specified resource is not
+// valid for this request.
+//
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide .
+//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion
+func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) {
+ req, out := c.ScheduleKeyDeletionRequest(input)
return out, req.Send()
}
@@ -6919,6 +8324,9 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
//
// Related operations: Verify
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -6940,8 +8348,8 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -6954,7 +8362,8 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -6971,10 +8380,21 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign
func (c *KMS) Sign(input *SignInput) (*SignOutput, error) {
req, out := c.SignRequest(input)
@@ -7044,7 +8464,7 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request,
// Adds or edits tags on a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk).
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
-// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// Each tag consists of a tag key and a tag value, both of which are case-sensitive
@@ -7086,6 +8506,9 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request,
//
// - UntagResource
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7111,10 +8534,18 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - LimitExceededException
// The request was rejected because a quota was exceeded. For more information,
// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html)
@@ -7193,7 +8624,7 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ
// To delete a tag, specify the tag key and the KMS key.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
-// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// When it succeeds, the UntagResource operation doesn't return any output.
@@ -7226,6 +8657,9 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ
//
// - TagResource
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7251,10 +8685,18 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - TagException
// The request was rejected because one or more tags are not valid.
//
@@ -7330,14 +8772,14 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// account and Region.
//
// Adding, deleting, or updating an alias can allow or deny permission to the
-// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+// KMS key. For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// The current and new KMS key must be the same type (both symmetric or both
-// asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY).
-// This restriction prevents errors in code that uses aliases. If you must assign
-// an alias to a different type of KMS key, use DeleteAlias to delete the old
-// alias and CreateAlias to create a new alias.
+// asymmetric or both HMAC), and they must have the same key usage. This restriction
+// prevents errors in code that uses aliases. If you must assign an alias to
+// a different type of KMS key, use DeleteAlias to delete the old alias and
+// CreateAlias to create a new alias.
//
// You cannot use UpdateAlias to change an alias name. To change an alias name,
// use DeleteAlias to delete the old alias and CreateAlias to create a new alias.
@@ -7376,6 +8818,9 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
//
// - ListAliases
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7386,8 +8831,8 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// Returned Error Types:
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - NotFoundException
// The request was rejected because the specified entity or resource could not
@@ -7406,10 +8851,18 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request,
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias
func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) {
req, out := c.UpdateAliasRequest(input)
@@ -7476,42 +8929,70 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
// UpdateCustomKeyStore API operation for AWS Key Management Service.
//
-// Changes the properties of a custom key store. Use the CustomKeyStoreId parameter
-// to identify the custom key store you want to edit. Use the remaining parameters
-// to change the properties of the custom key store.
-//
-// You can only update a custom key store that is disconnected. To disconnect
-// the custom key store, use DisconnectCustomKeyStore. To reconnect the custom
-// key store after the update completes, use ConnectCustomKeyStore. To find
-// the connection state of a custom key store, use the DescribeCustomKeyStores
-// operation.
-//
-// The CustomKeyStoreId parameter is required in all commands. Use the other
-// parameters of UpdateCustomKeyStore to edit your key store settings.
+// Changes the properties of a custom key store. You can use this operation
+// to change the properties of an CloudHSM key store or an external key store.
//
-// - Use the NewCustomKeyStoreName parameter to change the friendly name
-// of the custom key store to the value that you specify.
+// Use the required CustomKeyStoreId parameter to identify the custom key store.
+// Use the remaining optional parameters to change its properties. This operation
+// does not return any property values. To verify the updated property values,
+// use the DescribeCustomKeyStores operation.
//
-// - Use the KeyStorePassword parameter tell KMS the current password of
-// the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
-// in the associated CloudHSM cluster. You can use this parameter to fix
-// connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password)
-// that occur when KMS cannot log into the associated cluster because the
-// kmsuser password has changed. This value does not change the password
-// in the CloudHSM cluster.
-//
-// - Use the CloudHsmClusterId parameter to associate the custom key store
-// with a different, but related, CloudHSM cluster. You can use this parameter
-// to repair a custom key store if its CloudHSM cluster becomes corrupted
-// or is deleted, or when you need to create or restore a cluster from a
-// backup.
+// This operation is part of the custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+// feature in KMS, which combines the convenience and extensive integration
+// of KMS with the isolation and control of a key store that you own and manage.
+//
+// When updating the properties of an external key store, verify that the updated
+// settings connect your key store, via the external key store proxy, to the
+// same external key manager as the previous settings, or to a backup or snapshot
+// of the external key manager with the same cryptographic keys. If the updated
+// connection settings fail, you can fix them and retry, although an extended
+// delay might disrupt Amazon Web Services services. However, if KMS permanently
+// loses its access to cryptographic keys, ciphertext encrypted under those
+// keys is unrecoverable.
+//
+// For external key stores:
+//
+// Some external key managers provide a simpler method for updating an external
+// key store. For details, see your external key manager documentation.
+//
+// When updating an external key store in the KMS console, you can upload a
+// JSON-based proxy configuration file with the desired values. You cannot upload
+// the proxy configuration file to the UpdateCustomKeyStore operation. However,
+// you can use the file to help you determine the correct values for the UpdateCustomKeyStore
+// parameters.
+//
+// For an CloudHSM key store, you can use this operation to change the custom
+// key store friendly name (NewCustomKeyStoreName), to tell KMS about a change
+// to the kmsuser crypto user password (KeyStorePassword), or to associate the
+// custom key store with a different, but related, CloudHSM cluster (CloudHsmClusterId).
+// To update any property of an CloudHSM key store, the ConnectionState of the
+// CloudHSM key store must be DISCONNECTED.
+//
+// For an external key store, you can use this operation to change the custom
+// key store friendly name (NewCustomKeyStoreName), or to tell KMS about a change
+// to the external key store proxy authentication credentials (XksProxyAuthenticationCredential),
+// connection method (XksProxyConnectivity), external proxy endpoint (XksProxyUriEndpoint)
+// and path (XksProxyUriPath). For external key stores with an XksProxyConnectivity
+// of VPC_ENDPOINT_SERVICE, you can also update the Amazon VPC endpoint service
+// name (XksProxyVpcEndpointServiceName). To update most properties of an external
+// key store, the ConnectionState of the external key store must be DISCONNECTED.
+// However, you can update the CustomKeyStoreName, XksProxyAuthenticationCredential,
+// and XksProxyUriPath of an external key store when it is in the CONNECTED
+// or DISCONNECTED state.
+//
+// If your update requires a DISCONNECTED state, before using UpdateCustomKeyStore,
+// use the DisconnectCustomKeyStore operation to disconnect the custom key store.
+// After the UpdateCustomKeyStore operation completes, use the ConnectCustomKeyStore
+// to reconnect the custom key store. To find the ConnectionState of the custom
+// key store, use the DescribeCustomKeyStores operation.
+//
+// Before updating the custom key store, verify that the new values allow KMS
+// to connect the custom key store to its backing key store. For example, before
+// you change the XksProxyUriPath value, verify that the external key store
+// proxy is reachable at the new path.
//
// If the operation succeeds, it returns a JSON object with no properties.
//
-// This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
-// feature in KMS, which combines the convenience and extensive integration
-// of KMS with the isolation and control of a single-tenant key store.
-//
// Cross-account use: No. You cannot perform this operation on a custom key
// store in a different Amazon Web Services account.
//
@@ -7530,6 +9011,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
//
// - DisconnectCustomKeyStore
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7555,15 +9039,16 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
// - CloudHsmClusterNotRelatedException
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
-// to specify an unrelated cluster.
+// to specify an unrelated cluster for an CloudHSM key store.
//
-// Specify a cluster that shares a backup history with the original cluster.
-// This includes clusters that were created from a backup of the current cluster,
-// and clusters that were created from the same backup that produced the current
-// cluster.
+// Specify an CloudHSM cluster that shares a backup history with the original
+// cluster. This includes clusters that were created from a backup of the current
+// cluster, and clusters that were created from the same backup that produced
+// the current cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
//
// - CustomKeyStoreInvalidStateException
@@ -7573,60 +9058,127 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
//
// - CloudHsmClusterNotActiveException
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
//
// - CloudHsmClusterInvalidConfigurationException
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// in the CloudHSM User Guide .
//
+// - XksProxyUriInUseException
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with another external key store
+// in this Amazon Web Services Region. Each external key store in a Region must
+// use a unique external key store proxy API address.
+//
+// - XksProxyUriEndpointInUseException
+// The request was rejected because the XksProxyUriEndpoint is already associated
+// with another external key store in this Amazon Web Services Region. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - XksProxyUriUnreachableException
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+//
+// - XksProxyIncorrectAuthenticationCredentialException
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+//
+// - XksProxyVpcEndpointServiceInUseException
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with another external key store in this Amazon Web
+// Services Region. Each external key store in a Region must use a different
+// Amazon VPC endpoint service.
+//
+// - XksProxyVpcEndpointServiceNotFoundException
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+//
+// - XksProxyVpcEndpointServiceInvalidConfigurationException
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store. To identify
+// the cause, see the error message that accompanies the exception and review
+// the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+//
+// - XksProxyInvalidResponseException
+//
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+//
+// - XksProxyInvalidConfigurationException
+// The request was rejected because the external key store proxy is not configured
+// correctly. To identify the cause, see the error message that accompanies
+// the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore
func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) {
req, out := c.UpdateCustomKeyStoreRequest(input)
@@ -7712,6 +9264,9 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req
//
// - DescribeKey
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7730,8 +9285,8 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req
// is not valid.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InternalException
// The request was rejected because an internal exception occurred. The request
@@ -7741,10 +9296,18 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription
func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) {
req, out := c.UpdateKeyDescriptionRequest(input)
@@ -7882,6 +9445,9 @@ func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req *
//
// - ReplicateKey
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -7902,10 +9468,18 @@ func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req *
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - InternalException
// The request was rejected because an internal exception occurred. The request
// can be retried.
@@ -7997,22 +9571,25 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
// in the Key Management Service Developer Guide.
//
-// To verify a digital signature, you can use the Verify operation. Specify
-// the same asymmetric KMS key, message, and signing algorithm that were used
-// to produce the signature.
+// To use the Verify operation, specify the same asymmetric KMS key, message,
+// and signing algorithm that were used to produce the signature. The message
+// type does not need to be the same as the one used for signing, but it must
+// indicate whether the value of the Message parameter should be hashed as part
+// of the verification process.
//
// You can also verify the digital signature by using the public key of the
// KMS key outside of KMS. Use the GetPublicKey operation to download the public
// key in the asymmetric KMS key and then use the public key to verify the signature
-// outside of KMS. To verify a signature outside of KMS with an SM2 public key,
-// you must specify the distinguishing ID. By default, KMS uses 1234567812345678
+// outside of KMS. The advantage of using the Verify operation is that it is
+// performed within KMS. As a result, it's easy to call, the operation is performed
+// within the FIPS boundary, it is logged in CloudTrail, and you can use key
+// policy and IAM policy to determine who is authorized to use the KMS key to
+// verify signatures.
+//
+// To verify a signature outside of KMS with an SM2 public key (China Regions
+// only), you must specify the distinguishing ID. By default, KMS uses 1234567812345678
// as the distinguishing ID. For more information, see Offline verification
-// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification)
-// in Key Management Service Developer Guide. The advantage of using the Verify
-// operation is that it is performed within KMS. As a result, it's easy to call,
-// the operation is performed within the FIPS boundary, it is logged in CloudTrail,
-// and you can use key policy and IAM policy to determine who is authorized
-// to use the KMS key to verify signatures.
+// with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
//
// The KMS key that you use for this operation must be in a compatible key state.
// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
@@ -8027,6 +9604,9 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
//
// Related operations: Sign
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -8048,8 +9628,8 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// You can retry the request.
//
// - DependencyTimeoutException
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
//
// - InvalidKeyUsageException
// The request was rejected for one of the following reasons:
@@ -8062,7 +9642,8 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -8079,15 +9660,26 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
// - KMSInvalidSignatureException
// The request was rejected because the signature verification failed. Signature
// verification fails when it cannot confirm that signature was produced by
// signing the specified message with the specified KMS key and signing algorithm.
//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify
func (c *KMS) Verify(input *VerifyInput) (*VerifyOutput, error) {
req, out := c.VerifyRequest(input)
@@ -8157,10 +9749,12 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes
// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify,
// and compares the computed HMAC to the HMAC that you specify. If the HMACs
-// are identical, the verification succeeds; otherwise, it fails.
+// are identical, the verification succeeds; otherwise, it fails. Verification
+// indicates that the message hasn't changed since the HMAC was calculated,
+// and the specified key was used to generate and verify the HMAC.
//
-// Verification indicates that the message hasn't changed since the HMAC was
-// calculated, and the specified key was used to generate and verify the HMAC.
+// HMAC KMS keys and the HMAC algorithms that KMS uses conform to industry standards
+// defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
//
// This operation is part of KMS support for HMAC KMS keys. For details, see
// HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html)
@@ -8179,6 +9773,9 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
//
// Related operations: GenerateMac
//
+// Eventual consistency: The KMS API follows an eventual consistency model.
+// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html).
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -8210,7 +9807,8 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -8232,10 +9830,21 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
// in the Key Management Service Developer Guide .
//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
+//
+// - DryRunOperationException
+// The request was rejected because the DryRun parameter was specified.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMac
func (c *KMS) VerifyMac(input *VerifyMacInput) (*VerifyMacOutput, error) {
req, out := c.VerifyMacRequest(input)
@@ -8486,12 +10095,13 @@ func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput {
}
// The request was rejected because the specified CloudHSM cluster is already
-// associated with a custom key store or it shares a backup history with a cluster
-// that is associated with a custom key store. Each custom key store must be
-// associated with a different CloudHSM cluster.
+// associated with an CloudHSM key store in the account, or it shares a backup
+// history with an CloudHSM key store in the account. Each CloudHSM key store
+// in the account must be associated with a different CloudHSM cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
type CloudHsmClusterInUseException struct {
_ struct{} `type:"structure"`
@@ -8557,29 +10167,29 @@ func (s *CloudHsmClusterInUseException) RequestID() string {
}
// The request was rejected because the associated CloudHSM cluster did not
-// meet the configuration requirements for a custom key store.
+// meet the configuration requirements for an CloudHSM key store.
//
-// - The cluster must be configured with private subnets in at least two
-// different Availability Zones in the Region.
+// - The CloudHSM cluster must be configured with private subnets in at least
+// two different Availability Zones in the Region.
//
// - The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
-// group ID. These rules are set by default when you create the cluster.
-// Do not delete or change them. To get information about a particular security
-// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+// group ID. These rules are set by default when you create the CloudHSM
+// cluster. Do not delete or change them. To get information about a particular
+// security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
-// - The cluster must contain at least as many HSMs as the operation requires.
-// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+// - The CloudHSM cluster must contain at least as many HSMs as the operation
+// requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
-// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+// with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -8648,10 +10258,9 @@ func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string {
return s.RespMetadata.RequestID
}
-// The request was rejected because the CloudHSM cluster that is associated
-// with the custom key store is not active. Initialize and activate the cluster
-// and try the command again. For detailed instructions, see Getting Started
-// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+// The request was rejected because the CloudHSM cluster associated with the
+// CloudHSM key store is not active. Initialize and activate the cluster and
+// try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
type CloudHsmClusterNotActiveException struct {
_ struct{} `type:"structure"`
@@ -8783,15 +10392,16 @@ func (s *CloudHsmClusterNotFoundException) RequestID() string {
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
-// to specify an unrelated cluster.
+// to specify an unrelated cluster for an CloudHSM key store.
//
-// Specify a cluster that shares a backup history with the original cluster.
-// This includes clusters that were created from a backup of the current cluster,
-// and clusters that were created from the same backup that produced the current
-// cluster.
+// Specify an CloudHSM cluster that shares a backup history with the original
+// cluster. This includes clusters that were created from a backup of the current
+// cluster, and clusters that were created from the same backup that produced
+// the current cluster.
//
-// Clusters that share a backup history have the same cluster certificate. To
-// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+// CloudHSM clusters that share a backup history have the same cluster certificate.
+// To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
type CloudHsmClusterNotRelatedException struct {
_ struct{} `type:"structure"`
@@ -8856,6 +10466,71 @@ func (s *CloudHsmClusterNotRelatedException) RequestID() string {
return s.RespMetadata.RequestID
}
+// The request was rejected because an automatic rotation of this key is currently
+// in progress or scheduled to begin within the next 20 minutes.
+type ConflictException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ConflictException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ConflictException) GoString() string {
+ return s.String()
+}
+
+func newErrorConflictException(v protocol.ResponseMetadata) error {
+ return &ConflictException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ConflictException) Code() string {
+ return "ConflictException"
+}
+
+// Message returns the exception's message.
+func (s *ConflictException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ConflictException) OrigErr() error {
+ return nil
+}
+
+func (s *ConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ConflictException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ConflictException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
type ConnectCustomKeyStoreInput struct {
_ struct{} `type:"structure"`
@@ -8934,6 +10609,9 @@ type CreateAliasInput struct {
// Specifies the alias name. This value must begin with alias/ followed by a
// name, such as alias/ExampleAlias.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// The AliasName value must be string of 1-256 characters. It can contain only
// alphanumeric characters, forward slashes (/), underscores (_), and dashes
// (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is
@@ -9042,18 +10720,36 @@ func (s CreateAliasOutput) GoString() string {
type CreateCustomKeyStoreInput struct {
_ struct{} `type:"structure"`
- // Identifies the CloudHSM cluster for the custom key store. Enter the cluster
- // ID of any active CloudHSM cluster that is not already associated with a custom
- // key store. To find the cluster ID, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // Identifies the CloudHSM cluster for an CloudHSM key store. This parameter
+ // is required for custom key stores with CustomKeyStoreType of AWS_CLOUDHSM.
+ //
+ // Enter the cluster ID of any active CloudHSM cluster that is not already associated
+ // with a custom key store. To find the cluster ID, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
CloudHsmClusterId *string `min:"19" type:"string"`
// Specifies a friendly name for the custom key store. The name must be unique
- // in your Amazon Web Services account.
+ // in your Amazon Web Services account and Region. This parameter is required
+ // for all custom key stores.
+ //
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
//
// CustomKeyStoreName is a required field
CustomKeyStoreName *string `min:"1" type:"string" required:"true"`
+ // Specifies the type of custom key store. The default value is AWS_CLOUDHSM.
+ //
+ // For a custom key store backed by an CloudHSM cluster, omit the parameter
+ // or enter AWS_CLOUDHSM. For a custom key store backed by an external key manager
+ // outside of Amazon Web Services, enter EXTERNAL_KEY_STORE. You cannot change
+ // this property after the key store is created.
+ CustomKeyStoreType *string `type:"string" enum:"CustomKeyStoreType"`
+
+ // Specifies the kmsuser password for an CloudHSM key store. This parameter
+ // is required for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
+ //
// Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser)
// in the specified CloudHSM cluster. KMS logs into the cluster as this user
// to manage key material on your behalf.
@@ -9068,15 +10764,123 @@ type CreateCustomKeyStoreInput struct {
// String and GoString methods.
KeyStorePassword *string `min:"7" type:"string" sensitive:"true"`
- // Enter the content of the trust anchor certificate for the cluster. This is
- // the content of the customerCA.crt file that you created when you initialized
- // the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html).
+ // Specifies the certificate for an CloudHSM key store. This parameter is required
+ // for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
+ //
+ // Enter the content of the trust anchor certificate for the CloudHSM cluster.
+ // This is the content of the customerCA.crt file that you created when you
+ // initialized the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html).
TrustAnchorCertificate *string `min:"1" type:"string"`
-}
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
+ // Specifies an authentication credential for the external key store proxy (XKS
+ // proxy). This parameter is required for all custom key stores with a CustomKeyStoreType
+ // of EXTERNAL_KEY_STORE.
+ //
+ // The XksProxyAuthenticationCredential has two required elements: RawSecretAccessKey,
+ // a secret key, and AccessKeyId, a unique identifier for the RawSecretAccessKey.
+ // For character requirements, see XksProxyAuthenticationCredentialType (kms/latest/APIReference/API_XksProxyAuthenticationCredentialType.html).
+ //
+ // KMS uses this authentication credential to sign requests to the external
+ // key store proxy on your behalf. This credential is unrelated to Identity
+ // and Access Management (IAM) and Amazon Web Services credentials.
+ //
+ // This parameter doesn't set or change the authentication credentials on the
+ // XKS proxy. It just tells KMS the credential that you established on your
+ // external key store proxy. If you rotate your proxy authentication credential,
+ // use the UpdateCustomKeyStore operation to provide the new credential to KMS.
+ XksProxyAuthenticationCredential *XksProxyAuthenticationCredentialType `type:"structure"`
+
+ // Indicates how KMS communicates with the external key store proxy. This parameter
+ // is required for custom key stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // If the external key store proxy uses a public endpoint, specify PUBLIC_ENDPOINT.
+ // If the external key store proxy uses a Amazon VPC endpoint service for communication
+ // with KMS, specify VPC_ENDPOINT_SERVICE. For help making this choice, see
+ // Choosing a connectivity option (https://docs.aws.amazon.com/kms/latest/developerguide/plan-xks-keystore.html#choose-xks-connectivity)
+ // in the Key Management Service Developer Guide.
+ //
+ // An Amazon VPC endpoint service keeps your communication with KMS in a private
+ // address space entirely within Amazon Web Services, but it requires more configuration,
+ // including establishing a Amazon VPC with multiple subnets, a VPC endpoint
+ // service, a network load balancer, and a verified private DNS name. A public
+ // endpoint is simpler to set up, but it might be slower and might not fulfill
+ // your security requirements. You might consider testing with a public endpoint,
+ // and then establishing a VPC endpoint service for production tasks. Note that
+ // this choice does not determine the location of the external key store proxy.
+ // Even if you choose a VPC endpoint service, the proxy can be hosted within
+ // the VPC or outside of Amazon Web Services such as in your corporate data
+ // center.
+ XksProxyConnectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // Specifies the endpoint that KMS uses to send requests to the external key
+ // store proxy (XKS proxy). This parameter is required for custom key stores
+ // with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // The protocol must be HTTPS. KMS communicates on port 443. Do not specify
+ // the port in the XksProxyUriEndpoint value.
+ //
+ // For external key stores with XksProxyConnectivity value of VPC_ENDPOINT_SERVICE,
+ // specify https:// followed by the private DNS name of the VPC endpoint service.
+ //
+ // For external key stores with PUBLIC_ENDPOINT connectivity, this endpoint
+ // must be reachable before you create the custom key store. KMS connects to
+ // the external key store proxy while creating the custom key store. For external
+ // key stores with VPC_ENDPOINT_SERVICE connectivity, KMS connects when you
+ // call the ConnectCustomKeyStore operation.
+ //
+ // The value of this parameter must begin with https://. The remainder can contain
+ // upper and lower case letters (A-Z and a-z), numbers (0-9), dots (.), and
+ // hyphens (-). Additional slashes (/ and \) are not permitted.
+ //
+ // Uniqueness requirements:
+ //
+ // * The combined XksProxyUriEndpoint and XksProxyUriPath values must be
+ // unique in the Amazon Web Services account and Region.
+ //
+ // * An external key store with PUBLIC_ENDPOINT connectivity cannot use the
+ // same XksProxyUriEndpoint value as an external key store with VPC_ENDPOINT_SERVICE
+ // connectivity in this Amazon Web Services Region.
+ //
+ // * Each external key store with VPC_ENDPOINT_SERVICE connectivity must
+ // have its own private DNS name. The XksProxyUriEndpoint value for external
+ // key stores with VPC_ENDPOINT_SERVICE connectivity (private DNS name) must
+ // be unique in the Amazon Web Services account and Region.
+ XksProxyUriEndpoint *string `min:"10" type:"string"`
+
+ // Specifies the base path to the proxy APIs for this external key store. To
+ // find this value, see the documentation for your external key store proxy.
+ // This parameter is required for all custom key stores with a CustomKeyStoreType
+ // of EXTERNAL_KEY_STORE.
+ //
+ // The value must start with / and must end with /kms/xks/v1 where v1 represents
+ // the version of the KMS external key store proxy API. This path can include
+ // an optional prefix between the required elements such as /prefix/kms/xks/v1.
+ //
+ // Uniqueness requirements:
+ //
+ // * The combined XksProxyUriEndpoint and XksProxyUriPath values must be
+ // unique in the Amazon Web Services account and Region.
+ XksProxyUriPath *string `min:"10" type:"string"`
+
+ // Specifies the name of the Amazon VPC endpoint service for interface endpoints
+ // that is used to communicate with your external key store proxy (XKS proxy).
+ // This parameter is required when the value of CustomKeyStoreType is EXTERNAL_KEY_STORE
+ // and the value of XksProxyConnectivity is VPC_ENDPOINT_SERVICE.
+ //
+ // The Amazon VPC endpoint service must fulfill all requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keystore.html#xks-requirements)
+ // for use with an external key store.
+ //
+ // Uniqueness requirements:
+ //
+ // * External key stores with VPC_ENDPOINT_SERVICE connectivity can share
+ // an Amazon VPC, but each external key store must have its own VPC endpoint
+ // service and private DNS name.
+ XksProxyVpcEndpointServiceName *string `min:"20" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s CreateCustomKeyStoreInput) String() string {
@@ -9110,6 +10914,20 @@ func (s *CreateCustomKeyStoreInput) Validate() error {
if s.TrustAnchorCertificate != nil && len(*s.TrustAnchorCertificate) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TrustAnchorCertificate", 1))
}
+ if s.XksProxyUriEndpoint != nil && len(*s.XksProxyUriEndpoint) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriEndpoint", 10))
+ }
+ if s.XksProxyUriPath != nil && len(*s.XksProxyUriPath) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriPath", 10))
+ }
+ if s.XksProxyVpcEndpointServiceName != nil && len(*s.XksProxyVpcEndpointServiceName) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyVpcEndpointServiceName", 20))
+ }
+ if s.XksProxyAuthenticationCredential != nil {
+ if err := s.XksProxyAuthenticationCredential.Validate(); err != nil {
+ invalidParams.AddNested("XksProxyAuthenticationCredential", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -9129,6 +10947,12 @@ func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreName(v string) *CreateCusto
return s
}
+// SetCustomKeyStoreType sets the CustomKeyStoreType field's value.
+func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreType(v string) *CreateCustomKeyStoreInput {
+ s.CustomKeyStoreType = &v
+ return s
+}
+
// SetKeyStorePassword sets the KeyStorePassword field's value.
func (s *CreateCustomKeyStoreInput) SetKeyStorePassword(v string) *CreateCustomKeyStoreInput {
s.KeyStorePassword = &v
@@ -9141,6 +10965,36 @@ func (s *CreateCustomKeyStoreInput) SetTrustAnchorCertificate(v string) *CreateC
return s
}
+// SetXksProxyAuthenticationCredential sets the XksProxyAuthenticationCredential field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyAuthenticationCredential(v *XksProxyAuthenticationCredentialType) *CreateCustomKeyStoreInput {
+ s.XksProxyAuthenticationCredential = v
+ return s
+}
+
+// SetXksProxyConnectivity sets the XksProxyConnectivity field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyConnectivity(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyConnectivity = &v
+ return s
+}
+
+// SetXksProxyUriEndpoint sets the XksProxyUriEndpoint field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyUriEndpoint(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyUriEndpoint = &v
+ return s
+}
+
+// SetXksProxyUriPath sets the XksProxyUriPath field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyUriPath(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyUriPath = &v
+ return s
+}
+
+// SetXksProxyVpcEndpointServiceName sets the XksProxyVpcEndpointServiceName field's value.
+func (s *CreateCustomKeyStoreInput) SetXksProxyVpcEndpointServiceName(v string) *CreateCustomKeyStoreInput {
+ s.XksProxyVpcEndpointServiceName = &v
+ return s
+}
+
type CreateCustomKeyStoreOutput struct {
_ struct{} `type:"structure"`
@@ -9177,19 +11031,13 @@ type CreateGrantInput struct {
// Specifies a grant constraint.
//
- // KMS supports the EncryptionContextEquals and EncryptionContextSubset grant
- // constraints. Each constraint value can include up to 8 encryption context
- // pairs. The encryption context value in each constraint cannot exceed 384
- // characters. For information about grant constraints, see Using grant constraints
- // (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints)
- // in the Key Management Service Developer Guide. For more information about
- // encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context)
- // in the Key Management Service Developer Guide .
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
//
- // The encryption context grant constraints allow the permissions in the grant
- // only when the encryption context in the request matches (EncryptionContextEquals)
- // or includes (EncryptionContextSubset) the encryption context specified in
- // this structure.
+ // KMS supports the EncryptionContextEquals and EncryptionContextSubset grant
+ // constraints, which allow the permissions in the grant only when the encryption
+ // context in the request matches (EncryptionContextEquals) or includes (EncryptionContextSubset)
+ // the encryption context specified in the constraint.
//
// The encryption context grant constraints are supported only on grant operations
// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations)
@@ -9201,10 +11049,24 @@ type CreateGrantInput struct {
// permission have an equally strict or stricter encryption context constraint.
//
// You cannot use an encryption context grant constraint for cryptographic operations
- // with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption
- // context.
+ // with asymmetric KMS keys or HMAC KMS keys. Operations with these keys don't
+ // support an encryption context.
+ //
+ // Each constraint value can include up to 8 encryption context pairs. The encryption
+ // context value in each constraint cannot exceed 384 characters. For information
+ // about grant constraints, see Using grant constraints (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints)
+ // in the Key Management Service Developer Guide. For more information about
+ // encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context)
+ // in the Key Management Service Developer Guide .
Constraints *GrantConstraints `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -9216,13 +11078,11 @@ type CreateGrantInput struct {
// The identity that gets the permissions specified in the grant.
//
- // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, IAM roles, federated
- // users, and assumed role users. For examples of the ARN syntax to use for
- // specifying a principal, see Amazon Web Services Identity and Access Management
- // (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // To specify the grantee principal, use the Amazon Resource Name (ARN) of an
+ // Amazon Web Services principal. Valid principals include Amazon Web Services
+ // accounts, IAM users, IAM roles, federated users, and assumed role users.
+ // For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// GranteePrincipal is a required field
GranteePrincipal *string `min:"1" type:"string" required:"true"`
@@ -9247,6 +11107,9 @@ type CreateGrantInput struct {
// A friendly name for the grant. Use this value to prevent the unintended creation
// of duplicate grants when retrying this request.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// When this value is absent, all CreateGrant requests result in a new grant
// with a unique GrantId even if all the supplied parameters are identical.
// This can result in unintended duplicates when you retry the CreateGrant request.
@@ -9275,12 +11138,10 @@ type CreateGrantInput struct {
// the grant.
//
// To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, federated users,
- // and assumed role users. For examples of the ARN syntax to use for specifying
- // a principal, see Amazon Web Services Identity and Access Management (IAM)
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // of an Amazon Web Services principal. Valid principals include Amazon Web
+ // Services accounts, IAM users, IAM roles, federated users, and assumed role
+ // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// The grant determines the retiring principal. Other principals might have
// permission to retire the grant or revoke the grant. For details, see RevokeGrant
@@ -9344,6 +11205,12 @@ func (s *CreateGrantInput) SetConstraints(v *GrantConstraints) *CreateGrantInput
return s
}
+// SetDryRun sets the DryRun field's value.
+func (s *CreateGrantInput) SetDryRun(v bool) *CreateGrantInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *CreateGrantInput) SetGrantTokens(v []*string) *CreateGrantInput {
s.GrantTokens = v
@@ -9431,54 +11298,49 @@ func (s *CreateGrantOutput) SetGrantToken(v string) *CreateGrantOutput {
type CreateKeyInput struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
//
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide .
- //
- // Use this parameter only when you include a policy in the request and you
- // intend to prevent the principal that is making the request from making a
- // subsequent PutKeyPolicy request on the KMS key.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
- // The default value is false.
+ // Use this parameter only when you intend to prevent the principal that is
+ // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
- // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // and the key material in its associated CloudHSM cluster. To create a KMS
- // key in a custom key store, you must also specify the Origin parameter with
- // a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the
- // custom key store must have at least two active HSMs, each in a different
- // Availability Zone in the Region.
+ // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
+ // The ConnectionState of the custom key store must be CONNECTED. To find the
+ // CustomKeyStoreID and ConnectionState use the DescribeCustomKeyStores operation.
//
// This parameter is valid only for symmetric encryption KMS keys in a single
// Region. You cannot create any other type of KMS key in a custom key store.
//
- // To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
- //
- // The response includes the custom key store ID and the ID of the CloudHSM
- // cluster.
- //
- // This operation is part of the custom key store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // feature in KMS, which combines the convenience and extensive integration
- // of KMS with the isolation and control of a single-tenant key store.
+ // When you create a KMS key in an CloudHSM key store, KMS generates a non-exportable
+ // 256-bit symmetric key in its associated CloudHSM cluster and associates it
+ // with the KMS key. When you create a KMS key in an external key store, you
+ // must use the XksKeyId parameter to specify an external key that serves as
+ // key material for the KMS key.
CustomKeyStoreId *string `min:"1" type:"string"`
// Instead, use the KeySpec parameter.
//
// The KeySpec and CustomerMasterKeySpec parameters work the same way. Only
// the names differ. We recommend that you use KeySpec parameter in your code.
- // However, to avoid breaking changes, KMS will support both parameters.
+ // However, to avoid breaking changes, KMS supports both parameters.
//
// Deprecated: This parameter has been deprecated. Instead, use the KeySpec parameter.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
- // A description of the KMS key.
+ // A description of the KMS key. Use a description that helps you decide whether
+ // the KMS key is appropriate for a task. The default value is an empty string
+ // (no description).
//
- // Use a description that helps you decide whether the KMS key is appropriate
- // for a task. The default value is an empty string (no description).
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
//
// To set or change the description after the key is created, use UpdateKeyDescription.
Description *string `type:"string"`
@@ -9491,11 +11353,11 @@ type CreateKeyInput struct {
// in the Key Management Service Developer Guide .
//
// The KeySpec determines whether the KMS key contains a symmetric key or an
- // asymmetric key pair. It also determines the cryptographic algorithms that
- // the KMS key supports. You can't change the KeySpec after the KMS key is created.
- // To further restrict the algorithms that can be used with the KMS key, use
- // a condition key in its key policy or IAM policy. For more information, see
- // kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm),
+ // asymmetric key pair. It also determines the algorithms that the KMS key supports.
+ // You can't change the KeySpec after the KMS key is created. To further restrict
+ // the algorithms that can be used with the KMS key, use a condition key in
+ // its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm),
// kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm)
// or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm)
// in the Key Management Service Developer Guide .
@@ -9510,15 +11372,18 @@ type CreateKeyInput struct {
//
// * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512
//
- // * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096
+ // * Asymmetric RSA key pairs (encryption and decryption -or- signing and
+ // verification) RSA_2048 RSA_3072 RSA_4096
//
- // * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1)
- // ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1)
+ // * Asymmetric NIST-recommended elliptic curve key pairs (signing and verification
+ // -or- deriving shared secrets) ECC_NIST_P256 (secp256r1) ECC_NIST_P384
+ // (secp384r1) ECC_NIST_P521 (secp521r1)
//
- // * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1),
- // commonly used for cryptocurrencies.
+ // * Other asymmetric elliptic curve key pairs (signing and verification)
+ // ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies.
//
- // * SM2 key pairs (China Regions only) SM2
+ // * SM2 key pairs (encryption and decryption -or- signing and verification
+ // -or- deriving shared secrets) SM2 (China Regions only)
KeySpec *string `type:"string" enum:"KeySpec"`
// Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations)
@@ -9533,13 +11398,16 @@ type CreateKeyInput struct {
//
// * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC.
//
- // * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT
+ // * For asymmetric KMS keys with RSA key pairs, specify ENCRYPT_DECRYPT
// or SIGN_VERIFY.
//
- // * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY.
+ // * For asymmetric KMS keys with NIST-recommended elliptic curve key pairs,
+ // specify SIGN_VERIFY or KEY_AGREEMENT.
//
- // * For asymmetric KMS keys with SM2 key material (China Regions only),
- // specify ENCRYPT_DECRYPT or SIGN_VERIFY.
+ // * For asymmetric KMS keys with ECC_SECG_P256K1 key pairs specify SIGN_VERIFY.
+ //
+ // * For asymmetric KMS keys with SM2 key pairs (China Regions only), specify
+ // ENCRYPT_DECRYPT, SIGN_VERIFY, or KEY_AGREEMENT.
KeyUsage *string `type:"string" enum:"KeyUsageType"`
// Creates a multi-Region primary key that you can replicate into other Amazon
@@ -9561,73 +11429,70 @@ type CreateKeyInput struct {
// This value creates a primary key, not a replica. To create a replica key,
// use the ReplicateKey operation.
//
- // You can create a multi-Region version of a symmetric encryption KMS key,
- // an HMAC KMS key, an asymmetric KMS key, or a KMS key with imported key material.
- // However, you cannot create a multi-Region key in a custom key store.
+ // You can create a symmetric or asymmetric multi-Region key, and you can create
+ // a multi-Region key with imported key material. However, you cannot create
+ // a multi-Region key in a custom key store.
MultiRegion *bool `type:"boolean"`
// The source of the key material for the KMS key. You cannot change the origin
// after you create the KMS key. The default is AWS_KMS, which means that KMS
// creates the key material.
//
- // To create a KMS key with no key material (for imported key material), set
- // the value to EXTERNAL. For more information about importing key material
- // into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
- // in the Key Management Service Developer Guide. This value is valid only for
- // symmetric encryption KMS keys.
+ // To create a KMS key with no key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-create-cmk.html)
+ // (for imported key material), set this value to EXTERNAL. For more information
+ // about importing key material into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html)
+ // in the Key Management Service Developer Guide. The EXTERNAL origin value
+ // is valid only for symmetric KMS keys.
//
- // To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
+ // To create a KMS key in an CloudHSM key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-cmk-keystore.html)
// and create its key material in the associated CloudHSM cluster, set this
// value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to
- // identify the custom key store. This value is valid only for symmetric encryption
- // KMS keys.
+ // identify the CloudHSM key store. The KeySpec value must be SYMMETRIC_DEFAULT.
+ //
+ // To create a KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/create-xks-keys.html),
+ // set this value to EXTERNAL_KEY_STORE. You must also use the CustomKeyStoreId
+ // parameter to identify the external key store and the XksKeyId parameter to
+ // identify the associated external key. The KeySpec value must be SYMMETRIC_DEFAULT.
Origin *string `type:"string" enum:"OriginType"`
- // The key policy to attach to the KMS key. If you do not specify a key policy,
- // KMS attaches a default key policy to the KMS key. For more information, see
- // Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default)
- // in the Key Management Service Developer Guide.
+ // The key policy to attach to the KMS key.
//
// If you provide a key policy, it must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to True, the key policy
- // must allow the principal that is making the CreateKey request to make
- // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk
- // that the KMS key becomes unmanageable. For more information, refer to
- // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide .
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
// in the Amazon Web Services Identity and Access Management User Guide.
//
- // A key policy document can include only the following characters:
- //
- // * Printable ASCII characters from the space character (\u0020) through
- // the end of the ASCII character range.
- //
- // * Printable characters in the Basic Latin and Latin-1 Supplement character
- // set (through \u00FF).
+ // If you do not provide a key policy, KMS attaches a default key policy to
+ // the KMS key. For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default)
+ // in the Key Management Service Developer Guide.
//
- // * The tab (\u0009), line feed (\u000A), and carriage return (\u000D) special
- // characters
+ // The key policy size quota is 32 kilobytes (32768 bytes).
//
- // For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
- // in the Key Management Service Developer Guide. For help writing and formatting
- // a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
+ // For help writing and formatting a JSON policy document, see the IAM JSON
+ // Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
// in the Identity and Access Management User Guide .
Policy *string `min:"1" type:"string"`
// Assigns one or more tags to the KMS key. Use this parameter to tag the KMS
// key when it is created. To tag an existing KMS key, use the TagResource operation.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
@@ -9644,6 +11509,33 @@ type CreateKeyInput struct {
// Tags can also be used to control access to a KMS key. For details, see Tagging
// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html).
Tags []*Tag `type:"list"`
+
+ // Identifies the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+ // that serves as key material for the KMS key in an external key store (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html).
+ // Specify the ID that the external key store proxy (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-xks-proxy)
+ // uses to refer to the external key. For help, see the documentation for your
+ // external key store proxy.
+ //
+ // This parameter is required for a KMS key with an Origin value of EXTERNAL_KEY_STORE.
+ // It is not valid for KMS keys with any other Origin value.
+ //
+ // The external key must be an existing 256-bit AES symmetric encryption key
+ // hosted outside of Amazon Web Services in an external key manager associated
+ // with the external key store specified by the CustomKeyStoreId parameter.
+ // This key must be enabled and configured to perform encryption and decryption.
+ // Each KMS key in an external key store must use a different external key.
+ // For details, see Requirements for a KMS key in an external key store (https://docs.aws.amazon.com/create-xks-keys.html#xks-key-requirements)
+ // in the Key Management Service Developer Guide.
+ //
+ // Each KMS key in an external key store is associated two backing keys. One
+ // is key material that KMS generates. The other is the external key specified
+ // by this parameter. When you use the KMS key in an external key store to encrypt
+ // data, the encryption operation is performed first by KMS using the KMS key
+ // material, and then by the external key manager using the specified external
+ // key, a process known as double encryption. For details, see Double encryption
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-double-encryption)
+ // in the Key Management Service Developer Guide.
+ XksKeyId *string `min:"1" type:"string"`
}
// String returns the string representation.
@@ -9673,6 +11565,9 @@ func (s *CreateKeyInput) Validate() error {
if s.Policy != nil && len(*s.Policy) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
}
+ if s.XksKeyId != nil && len(*s.XksKeyId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("XksKeyId", 1))
+ }
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@@ -9750,6 +11645,12 @@ func (s *CreateKeyInput) SetTags(v []*Tag) *CreateKeyInput {
return s
}
+// SetXksKeyId sets the XksKeyId field's value.
+func (s *CreateKeyInput) SetXksKeyId(v string) *CreateKeyInput {
+ s.XksKeyId = &v
+ return s
+}
+
type CreateKeyOutput struct {
_ struct{} `type:"structure"`
@@ -9854,17 +11755,27 @@ func (s *CustomKeyStoreHasCMKsException) RequestID() string {
//
// This exception is thrown under the following conditions:
//
-// - You requested the CreateKey or GenerateRandom operation in a custom
-// key store that is not connected. These operations are valid only when
-// the custom key store ConnectionState is CONNECTED.
+// - You requested the ConnectCustomKeyStore operation on a custom key store
+// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+// for all other ConnectionState values. To reconnect a custom key store
+// in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+// it (ConnectCustomKeyStore).
+//
+// - You requested the CreateKey operation in a custom key store that is
+// not connected. This operations is valid only when the custom key store
+// ConnectionState is CONNECTED.
+//
+// - You requested the DisconnectCustomKeyStore operation on a custom key
+// store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+// is valid for all other ConnectionState values.
//
// - You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
-// - You requested the ConnectCustomKeyStore operation on a custom key store
-// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
-// for all other ConnectionState values.
+// - You requested the GenerateRandom operation in an CloudHSM key store
+// that is not connected. This operation is valid only when the CloudHSM
+// key store ConnectionState is CONNECTED.
type CustomKeyStoreInvalidStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -10064,39 +11975,53 @@ func (s *CustomKeyStoreNotFoundException) RequestID() string {
type CustomKeyStoresListEntry struct {
_ struct{} `type:"structure"`
- // A unique identifier for the CloudHSM cluster that is associated with the
- // custom key store.
+ // A unique identifier for the CloudHSM cluster that is associated with an CloudHSM
+ // key store. This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.
CloudHsmClusterId *string `min:"19" type:"string"`
// Describes the connection error. This field appears in the response only when
- // the ConnectionState is FAILED. For help resolving these errors, see How to
- // Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
- // in Key Management Service Developer Guide.
- //
- // Valid values are:
+ // the ConnectionState is FAILED.
//
- // * CLUSTER_NOT_FOUND - KMS cannot find the CloudHSM cluster with the specified
- // cluster ID.
+ // Many failures can be resolved by updating the properties of the custom key
+ // store. To update a custom key store, disconnect it (DisconnectCustomKeyStore),
+ // correct the errors (UpdateCustomKeyStore), and try to connect again (ConnectCustomKeyStore).
+ // For additional help resolving these errors, see How to Fix a Connection Failure
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
+ // in Key Management Service Developer Guide.
//
- // * INSUFFICIENT_CLOUDHSM_HSMS - The associated CloudHSM cluster does not
- // contain any active HSMs. To connect a custom key store to its CloudHSM
- // cluster, the cluster must contain at least one active HSM.
+ // All custom key stores:
//
- // * INTERNAL_ERROR - KMS could not complete the request due to an internal
+ // * INTERNAL_ERROR — KMS could not complete the request due to an internal
// error. Retry the request. For ConnectCustomKeyStore requests, disconnect
// the custom key store before trying to connect again.
//
- // * INVALID_CREDENTIALS - KMS does not have the correct password for the
- // kmsuser crypto user in the CloudHSM cluster. Before you can connect your
- // custom key store to its CloudHSM cluster, you must change the kmsuser
- // account password and update the key store password value for the custom
- // key store.
+ // * NETWORK_ERRORS — Network errors are preventing KMS from connecting
+ // the custom key store to its backing key store.
//
- // * NETWORK_ERRORS - Network errors are preventing KMS from connecting to
- // the custom key store.
+ // CloudHSM key stores:
+ //
+ // * CLUSTER_NOT_FOUND — KMS cannot find the CloudHSM cluster with the
+ // specified cluster ID.
+ //
+ // * INSUFFICIENT_CLOUDHSM_HSMS — The associated CloudHSM cluster does
+ // not contain any active HSMs. To connect a custom key store to its CloudHSM
+ // cluster, the cluster must contain at least one active HSM.
//
- // * SUBNET_NOT_FOUND - A subnet in the CloudHSM cluster configuration was
- // deleted. If KMS cannot find all of the subnets in the cluster configuration,
+ // * INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET — At least one private subnet
+ // associated with the CloudHSM cluster doesn't have any available IP addresses.
+ // A CloudHSM key store connection requires one free IP address in each of
+ // the associated private subnets, although two are preferable. For details,
+ // see How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
+ // in the Key Management Service Developer Guide.
+ //
+ // * INVALID_CREDENTIALS — The KeyStorePassword for the custom key store
+ // doesn't match the current password of the kmsuser crypto user in the CloudHSM
+ // cluster. Before you can connect your custom key store to its CloudHSM
+ // cluster, you must change the kmsuser account password and update the KeyStorePassword
+ // value for the custom key store.
+ //
+ // * SUBNET_NOT_FOUND — A subnet in the CloudHSM cluster configuration
+ // was deleted. If KMS cannot find all of the subnets in the cluster configuration,
// attempts to connect the custom key store to the CloudHSM cluster fail.
// To fix this error, create a cluster from a recent backup and associate
// it with your custom key store. (This process creates a new cluster configuration
@@ -10104,13 +12029,13 @@ type CustomKeyStoresListEntry struct {
// Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed)
// in the Key Management Service Developer Guide.
//
- // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated
+ // * USER_LOCKED_OUT — The kmsuser CU account is locked out of the associated
// CloudHSM cluster due to too many failed password attempts. Before you
// can connect your custom key store to its CloudHSM cluster, you must change
// the kmsuser account password and update the key store password value for
// the custom key store.
//
- // * USER_LOGGED_IN - The kmsuser CU account is logged into the the associated
+ // * USER_LOGGED_IN — The kmsuser CU account is logged into the associated
// CloudHSM cluster. This prevents KMS from rotating the kmsuser account
// password and logging into the cluster. Before you can connect your custom
// key store to its CloudHSM cluster, you must log the kmsuser CU out of
@@ -10119,27 +12044,94 @@ type CustomKeyStoresListEntry struct {
// store. For help, see How to Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2)
// in the Key Management Service Developer Guide.
//
- // * USER_NOT_FOUND - KMS cannot find a kmsuser CU account in the associated
+ // * USER_NOT_FOUND — KMS cannot find a kmsuser CU account in the associated
// CloudHSM cluster. Before you can connect your custom key store to its
// CloudHSM cluster, you must create a kmsuser CU account in the cluster,
// and then update the key store password value for the custom key store.
+ //
+ // External key stores:
+ //
+ // * INVALID_CREDENTIALS — One or both of the XksProxyAuthenticationCredential
+ // values is not valid on the specified external key store proxy.
+ //
+ // * XKS_PROXY_ACCESS_DENIED — KMS requests are denied access to the external
+ // key store proxy. If the external key store proxy has authorization rules,
+ // verify that they permit KMS to communicate with the proxy on your behalf.
+ //
+ // * XKS_PROXY_INVALID_CONFIGURATION — A configuration error is preventing
+ // the external key store from connecting to its proxy. Verify the value
+ // of the XksProxyUriPath.
+ //
+ // * XKS_PROXY_INVALID_RESPONSE — KMS cannot interpret the response from
+ // the external key store proxy. If you see this connection error code repeatedly,
+ // notify your external key store proxy vendor.
+ //
+ // * XKS_PROXY_INVALID_TLS_CONFIGURATION — KMS cannot connect to the external
+ // key store proxy because the TLS configuration is invalid. Verify that
+ // the XKS proxy supports TLS 1.2 or 1.3. Also, verify that the TLS certificate
+ // is not expired, and that it matches the hostname in the XksProxyUriEndpoint
+ // value, and that it is signed by a certificate authority included in the
+ // Trusted Certificate Authorities (https://github.com/aws/aws-kms-xksproxy-api-spec/blob/main/TrustedCertificateAuthorities)
+ // list.
+ //
+ // * XKS_PROXY_NOT_REACHABLE — KMS can't communicate with your external
+ // key store proxy. Verify that the XksProxyUriEndpoint and XksProxyUriPath
+ // are correct. Use the tools for your external key store proxy to verify
+ // that the proxy is active and available on its network. Also, verify that
+ // your external key manager instances are operating properly. Connection
+ // attempts fail with this connection error code if the proxy reports that
+ // all external key manager instances are unavailable.
+ //
+ // * XKS_PROXY_TIMED_OUT — KMS can connect to the external key store proxy,
+ // but the proxy does not respond to KMS in the time allotted. If you see
+ // this connection error code repeatedly, notify your external key store
+ // proxy vendor.
+ //
+ // * XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION — The Amazon VPC endpoint
+ // service configuration doesn't conform to the requirements for an KMS external
+ // key store. The VPC endpoint service must be an endpoint service for interface
+ // endpoints in the caller's Amazon Web Services account. It must have a
+ // network load balancer (NLB) connected to at least two subnets, each in
+ // a different Availability Zone. The Allow principals list must include
+ // the KMS service principal for the Region, cks.kms..amazonaws.com,
+ // such as cks.kms.us-east-1.amazonaws.com. It must not require acceptance
+ // (https://docs.aws.amazon.com/vpc/latest/privatelink/create-endpoint-service.html)
+ // of connection requests. It must have a private DNS name. The private DNS
+ // name for an external key store with VPC_ENDPOINT_SERVICE connectivity
+ // must be unique in its Amazon Web Services Region. The domain of the private
+ // DNS name must have a verification status (https://docs.aws.amazon.com/vpc/latest/privatelink/verify-domains.html)
+ // of verified. The TLS certificate (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html)
+ // specifies the private DNS hostname at which the endpoint is reachable.
+ //
+ // * XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND — KMS can't find the VPC endpoint
+ // service that it uses to communicate with the external key store proxy.
+ // Verify that the XksProxyVpcEndpointServiceName is correct and the KMS
+ // service principal has service consumer permissions on the Amazon VPC endpoint
+ // service.
ConnectionErrorCode *string `type:"string" enum:"ConnectionErrorCodeType"`
- // Indicates whether the custom key store is connected to its CloudHSM cluster.
+ // Indicates whether the custom key store is connected to its backing key store.
+ // For an CloudHSM key store, the ConnectionState indicates whether it is connected
+ // to its CloudHSM cluster. For an external key store, the ConnectionState indicates
+ // whether it is connected to the external key store proxy that communicates
+ // with your external key manager.
//
- // You can create and use KMS keys in your custom key stores only when its connection
- // state is CONNECTED.
+ // You can create and use KMS keys in your custom key stores only when its ConnectionState
+ // is CONNECTED.
//
- // The value is DISCONNECTED if the key store has never been connected or you
- // use the DisconnectCustomKeyStore operation to disconnect it. If the value
- // is CONNECTED but you are having trouble using the custom key store, make
- // sure that its associated CloudHSM cluster is active and contains at least
- // one active HSM.
+ // The ConnectionState value is DISCONNECTED only if the key store has never
+ // been connected or you use the DisconnectCustomKeyStore operation to disconnect
+ // it. If the value is CONNECTED but you are having trouble using the custom
+ // key store, make sure that the backing key store is reachable and active.
+ // For an CloudHSM key store, verify that its associated CloudHSM cluster is
+ // active and contains at least one active HSM. For an external key store, verify
+ // that the external key store proxy and external key manager are connected
+ // and enabled.
//
// A value of FAILED indicates that an attempt to connect was unsuccessful.
// The ConnectionErrorCode field in the response indicates the cause of the
- // failure. For help resolving a connection failure, see Troubleshooting a Custom
- // Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
+ // failure. For help resolving a connection failure, see Troubleshooting a custom
+ // key store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html)
// in the Key Management Service Developer Guide.
ConnectionState *string `type:"string" enum:"ConnectionStateType"`
@@ -10152,10 +12144,26 @@ type CustomKeyStoresListEntry struct {
// The user-specified friendly name for the custom key store.
CustomKeyStoreName *string `min:"1" type:"string"`
- // The trust anchor certificate of the associated CloudHSM cluster. When you
- // initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+ // Indicates the type of the custom key store. AWS_CLOUDHSM indicates a custom
+ // key store backed by an CloudHSM cluster. EXTERNAL_KEY_STORE indicates a custom
+ // key store backed by an external key store proxy and external key manager
+ // outside of Amazon Web Services.
+ CustomKeyStoreType *string `type:"string" enum:"CustomKeyStoreType"`
+
+ // The trust anchor certificate of the CloudHSM cluster associated with an CloudHSM
+ // key store. When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create this certificate and save it in the customerCA.crt file.
+ //
+ // This field appears only when the CustomKeyStoreType is AWS_CLOUDHSM.
TrustAnchorCertificate *string `min:"1" type:"string"`
+
+ // Configuration settings for the external key store proxy (XKS proxy). The
+ // external key store proxy translates KMS requests into a format that your
+ // external key manager can understand. The proxy configuration includes connection
+ // information that KMS requires.
+ //
+ // This field appears only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.
+ XksProxyConfiguration *XksProxyConfigurationType `type:"structure"`
}
// String returns the string representation.
@@ -10212,12 +12220,24 @@ func (s *CustomKeyStoresListEntry) SetCustomKeyStoreName(v string) *CustomKeySto
return s
}
+// SetCustomKeyStoreType sets the CustomKeyStoreType field's value.
+func (s *CustomKeyStoresListEntry) SetCustomKeyStoreType(v string) *CustomKeyStoresListEntry {
+ s.CustomKeyStoreType = &v
+ return s
+}
+
// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value.
func (s *CustomKeyStoresListEntry) SetTrustAnchorCertificate(v string) *CustomKeyStoresListEntry {
s.TrustAnchorCertificate = &v
return s
}
+// SetXksProxyConfiguration sets the XksProxyConfiguration field's value.
+func (s *CustomKeyStoresListEntry) SetXksProxyConfiguration(v *XksProxyConfigurationType) *CustomKeyStoresListEntry {
+ s.XksProxyConfiguration = v
+ return s
+}
+
type DecryptInput struct {
_ struct{} `type:"structure"`
@@ -10227,6 +12247,13 @@ type DecryptInput struct {
// CiphertextBlob is a required field
CiphertextBlob []byte `min:"1" type:"blob" required:"true"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption algorithm that will be used to decrypt the ciphertext.
// Specify the same algorithm that was used to encrypt the data. If you specify
// a different algorithm, the Decrypt operation fails.
@@ -10291,6 +12318,27 @@ type DecryptInput struct {
// To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
// To get the alias name and alias ARN, use ListAliases.
KeyId *string `min:"1" type:"string"`
+
+ // A signed attestation document (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-concepts.html#term-attestdoc)
+ // from an Amazon Web Services Nitro enclave and the encryption algorithm to
+ // use with the enclave's public key. The only valid encryption algorithm is
+ // RSAES_OAEP_SHA_256.
+ //
+ // This parameter only supports attestation documents for Amazon Web Services
+ // Nitro Enclaves. To include this parameter, use the Amazon Web Services Nitro
+ // Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+ // or any Amazon Web Services SDK.
+ //
+ // When you use this parameter, instead of returning the plaintext data, KMS
+ // encrypts the plaintext data with the public key in the attestation document,
+ // and returns the resulting ciphertext in the CiphertextForRecipient field
+ // in the response. This ciphertext can be decrypted only with the private key
+ // in the enclave. The Plaintext field in the response is null or empty.
+ //
+ // For information about the interaction between KMS and Amazon Web Services
+ // Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ Recipient *RecipientInfo `type:"structure"`
}
// String returns the string representation.
@@ -10323,6 +12371,11 @@ func (s *DecryptInput) Validate() error {
if s.KeyId != nil && len(*s.KeyId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
}
+ if s.Recipient != nil {
+ if err := s.Recipient.Validate(); err != nil {
+ invalidParams.AddNested("Recipient", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -10336,6 +12389,12 @@ func (s *DecryptInput) SetCiphertextBlob(v []byte) *DecryptInput {
return s
}
+// SetDryRun sets the DryRun field's value.
+func (s *DecryptInput) SetDryRun(v bool) *DecryptInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value.
func (s *DecryptInput) SetEncryptionAlgorithm(v string) *DecryptInput {
s.EncryptionAlgorithm = &v
@@ -10360,9 +12419,26 @@ func (s *DecryptInput) SetKeyId(v string) *DecryptInput {
return s
}
+// SetRecipient sets the Recipient field's value.
+func (s *DecryptInput) SetRecipient(v *RecipientInfo) *DecryptInput {
+ s.Recipient = v
+ return s
+}
+
type DecryptOutput struct {
_ struct{} `type:"structure"`
+ // The plaintext data encrypted with the public key in the attestation document.
+ //
+ // This field is included in the response only when the Recipient parameter
+ // in the request includes a valid attestation document from an Amazon Web Services
+ // Nitro enclave. For information about the interaction between KMS and Amazon
+ // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses
+ // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ // CiphertextForRecipient is automatically base64 encoded/decoded by the SDK.
+ CiphertextForRecipient []byte `min:"1" type:"blob"`
+
// The encryption algorithm that was used to decrypt the ciphertext.
EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"`
@@ -10373,6 +12449,9 @@ type DecryptOutput struct {
// Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services
// CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.
//
+ // If the response includes the CiphertextForRecipient field, the Plaintext
+ // field is null or empty.
+ //
// Plaintext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by DecryptOutput's
// String and GoString methods.
@@ -10399,6 +12478,12 @@ func (s DecryptOutput) GoString() string {
return s.String()
}
+// SetCiphertextForRecipient sets the CiphertextForRecipient field's value.
+func (s *DecryptOutput) SetCiphertextForRecipient(v []byte) *DecryptOutput {
+ s.CiphertextForRecipient = v
+ return s
+}
+
// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value.
func (s *DecryptOutput) SetEncryptionAlgorithm(v string) *DecryptOutput {
s.EncryptionAlgorithm = &v
@@ -10643,8 +12728,8 @@ func (s DeleteImportedKeyMaterialOutput) GoString() string {
return s.String()
}
-// The system timed out while trying to fulfill the request. The request can
-// be retried.
+// The system timed out while trying to fulfill the request. You can retry the
+// request.
type DependencyTimeoutException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -10708,36 +12793,103 @@ func (s *DependencyTimeoutException) RequestID() string {
return s.RespMetadata.RequestID
}
-type DescribeCustomKeyStoresInput struct {
+type DeriveSharedSecretInput struct {
_ struct{} `type:"structure"`
- // Gets only information about the specified custom key store. Enter the key
- // store ID.
+ // Checks if your request will succeed. DryRun is an optional parameter.
//
- // By default, this operation gets information about all custom key stores in
- // the account and Region. To limit the output to a particular custom key store,
- // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter,
- // but not both.
- CustomKeyStoreId *string `min:"1" type:"string"`
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
- // Gets only information about the specified custom key store. Enter the friendly
- // name of the custom key store.
+ // A list of grant tokens.
//
- // By default, this operation gets information about all custom key stores in
- // the account and Region. To limit the output to a particular custom key store,
- // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter,
- // but not both.
- CustomKeyStoreName *string `min:"1" type:"string"`
+ // Use a grant token when your permission to call this operation comes from
+ // a new grant that has not yet achieved eventual consistency. For more information,
+ // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token)
+ // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token)
+ // in the Key Management Service Developer Guide.
+ GrantTokens []*string `type:"list"`
- // Use this parameter to specify the maximum number of items to return. When
- // this value is present, KMS does not return more than the specified number
- // of items, but it might return fewer.
- Limit *int64 `min:"1" type:"integer"`
+ // Specifies the key agreement algorithm used to derive the shared secret. The
+ // only valid value is ECDH.
+ //
+ // KeyAgreementAlgorithm is a required field
+ KeyAgreementAlgorithm *string `type:"string" required:"true" enum:"KeyAgreementAlgorithmSpec"`
- // Use this parameter in a subsequent request after you receive a response with
- // truncated results. Set it to the value of NextMarker from the truncated response
- // you just received.
- Marker *string `min:"1" type:"string"`
+ // Identifies an asymmetric NIST-recommended ECC or SM2 (China Regions only)
+ // KMS key. KMS uses the private key in the specified key pair to derive the
+ // shared secret. The key usage of the KMS key must be KEY_AGREEMENT. To find
+ // the KeyUsage of a KMS key, use the DescribeKey operation.
+ //
+ // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN.
+ // When using an alias name, prefix it with "alias/". To specify a KMS key in
+ // a different Amazon Web Services account, you must use the key ARN or alias
+ // ARN.
+ //
+ // For example:
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Alias name: alias/ExampleAlias
+ //
+ // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias
+ //
+ // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
+ // To get the alias name and alias ARN, use ListAliases.
+ //
+ // KeyId is a required field
+ KeyId *string `min:"1" type:"string" required:"true"`
+
+ // Specifies the public key in your peer's NIST-recommended elliptic curve (ECC)
+ // or SM2 (China Regions only) key pair.
+ //
+ // The public key must be a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo
+ // (SPKI), as defined in RFC 5280 (https://tools.ietf.org/html/rfc5280).
+ //
+ // GetPublicKey returns the public key of an asymmetric KMS key pair in the
+ // required DER-encoded format.
+ //
+ // If you use Amazon Web Services CLI version 1 (https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-welcome.html),
+ // you must provide the DER-encoded X.509 public key in a file. Otherwise, the
+ // Amazon Web Services CLI Base64-encodes the public key a second time, resulting
+ // in a ValidationException.
+ //
+ // You can specify the public key as binary data in a file using fileb (fileb://)
+ // or in-line using a Base64 encoded string.
+ // PublicKey is automatically base64 encoded/decoded by the SDK.
+ //
+ // PublicKey is a required field
+ PublicKey []byte `min:"1" type:"blob" required:"true"`
+
+ // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc)
+ // from an Amazon Web Services Nitro enclave and the encryption algorithm to
+ // use with the enclave's public key. The only valid encryption algorithm is
+ // RSAES_OAEP_SHA_256.
+ //
+ // This parameter only supports attestation documents for Amazon Web Services
+ // Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro
+ // Enclaves, use the Amazon Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+ // to generate the attestation document and then use the Recipient parameter
+ // from any Amazon Web Services SDK to provide the attestation document for
+ // the enclave.
+ //
+ // When you use this parameter, instead of returning a plaintext copy of the
+ // shared secret, KMS encrypts the plaintext shared secret under the public
+ // key in the attestation document, and returns the resulting ciphertext in
+ // the CiphertextForRecipient field in the response. This ciphertext can be
+ // decrypted only with the private key in the enclave. The CiphertextBlob field
+ // in the response contains the encrypted shared secret derived from the KMS
+ // key specified by the KeyId parameter and public key specified by the PublicKey
+ // parameter. The SharedSecret field in the response is null or empty.
+ //
+ // For information about the interaction between KMS and Amazon Web Services
+ // Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ Recipient *RecipientInfo `type:"structure"`
}
// String returns the string representation.
@@ -10745,7 +12897,7 @@ type DescribeCustomKeyStoresInput struct {
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
-func (s DescribeCustomKeyStoresInput) String() string {
+func (s DeriveSharedSecretInput) String() string {
return awsutil.Prettify(s)
}
@@ -10754,24 +12906,32 @@ func (s DescribeCustomKeyStoresInput) String() string {
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
-func (s DescribeCustomKeyStoresInput) GoString() string {
+func (s DeriveSharedSecretInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeCustomKeyStoresInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeCustomKeyStoresInput"}
- if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1))
+func (s *DeriveSharedSecretInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeriveSharedSecretInput"}
+ if s.KeyAgreementAlgorithm == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyAgreementAlgorithm"))
}
- if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1))
+ if s.KeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyId"))
}
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ if s.KeyId != nil && len(*s.KeyId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
}
- if s.Marker != nil && len(*s.Marker) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Marker", 1))
+ if s.PublicKey == nil {
+ invalidParams.Add(request.NewErrParamRequired("PublicKey"))
+ }
+ if s.PublicKey != nil && len(s.PublicKey) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("PublicKey", 1))
+ }
+ if s.Recipient != nil {
+ if err := s.Recipient.Validate(); err != nil {
+ invalidParams.AddNested("Recipient", err.(request.ErrInvalidParams))
+ }
}
if invalidParams.Len() > 0 {
@@ -10780,34 +12940,235 @@ func (s *DescribeCustomKeyStoresInput) Validate() error {
return nil
}
-// SetCustomKeyStoreId sets the CustomKeyStoreId field's value.
-func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreId(v string) *DescribeCustomKeyStoresInput {
- s.CustomKeyStoreId = &v
+// SetDryRun sets the DryRun field's value.
+func (s *DeriveSharedSecretInput) SetDryRun(v bool) *DeriveSharedSecretInput {
+ s.DryRun = &v
return s
}
-// SetCustomKeyStoreName sets the CustomKeyStoreName field's value.
-func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreName(v string) *DescribeCustomKeyStoresInput {
- s.CustomKeyStoreName = &v
+// SetGrantTokens sets the GrantTokens field's value.
+func (s *DeriveSharedSecretInput) SetGrantTokens(v []*string) *DeriveSharedSecretInput {
+ s.GrantTokens = v
return s
}
-// SetLimit sets the Limit field's value.
-func (s *DescribeCustomKeyStoresInput) SetLimit(v int64) *DescribeCustomKeyStoresInput {
- s.Limit = &v
+// SetKeyAgreementAlgorithm sets the KeyAgreementAlgorithm field's value.
+func (s *DeriveSharedSecretInput) SetKeyAgreementAlgorithm(v string) *DeriveSharedSecretInput {
+ s.KeyAgreementAlgorithm = &v
return s
}
-// SetMarker sets the Marker field's value.
-func (s *DescribeCustomKeyStoresInput) SetMarker(v string) *DescribeCustomKeyStoresInput {
- s.Marker = &v
+// SetKeyId sets the KeyId field's value.
+func (s *DeriveSharedSecretInput) SetKeyId(v string) *DeriveSharedSecretInput {
+ s.KeyId = &v
return s
}
-type DescribeCustomKeyStoresOutput struct {
- _ struct{} `type:"structure"`
+// SetPublicKey sets the PublicKey field's value.
+func (s *DeriveSharedSecretInput) SetPublicKey(v []byte) *DeriveSharedSecretInput {
+ s.PublicKey = v
+ return s
+}
- // Contains metadata about each custom key store.
+// SetRecipient sets the Recipient field's value.
+func (s *DeriveSharedSecretInput) SetRecipient(v *RecipientInfo) *DeriveSharedSecretInput {
+ s.Recipient = v
+ return s
+}
+
+type DeriveSharedSecretOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The plaintext shared secret encrypted with the public key in the attestation
+ // document.
+ //
+ // This field is included in the response only when the Recipient parameter
+ // in the request includes a valid attestation document from an Amazon Web Services
+ // Nitro enclave. For information about the interaction between KMS and Amazon
+ // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses
+ // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ // CiphertextForRecipient is automatically base64 encoded/decoded by the SDK.
+ CiphertextForRecipient []byte `min:"1" type:"blob"`
+
+ // Identifies the key agreement algorithm used to derive the shared secret.
+ KeyAgreementAlgorithm *string `type:"string" enum:"KeyAgreementAlgorithmSpec"`
+
+ // Identifies the KMS key used to derive the shared secret.
+ KeyId *string `min:"1" type:"string"`
+
+ // The source of the key material for the specified KMS key.
+ //
+ // When this value is AWS_KMS, KMS created the key material. When this value
+ // is EXTERNAL, the key material was imported or the KMS key doesn't have any
+ // key material.
+ //
+ // The only valid values for DeriveSharedSecret are AWS_KMS and EXTERNAL. DeriveSharedSecret
+ // does not support KMS keys with a KeyOrigin value of AWS_CLOUDHSM or EXTERNAL_KEY_STORE.
+ KeyOrigin *string `type:"string" enum:"OriginType"`
+
+ // The raw secret derived from the specified key agreement algorithm, private
+ // key in the asymmetric KMS key, and your peer's public key.
+ //
+ // If the response includes the CiphertextForRecipient field, the SharedSecret
+ // field is null or empty.
+ //
+ // SharedSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by DeriveSharedSecretOutput's
+ // String and GoString methods.
+ //
+ // SharedSecret is automatically base64 encoded/decoded by the SDK.
+ SharedSecret []byte `min:"1" type:"blob" sensitive:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeriveSharedSecretOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeriveSharedSecretOutput) GoString() string {
+ return s.String()
+}
+
+// SetCiphertextForRecipient sets the CiphertextForRecipient field's value.
+func (s *DeriveSharedSecretOutput) SetCiphertextForRecipient(v []byte) *DeriveSharedSecretOutput {
+ s.CiphertextForRecipient = v
+ return s
+}
+
+// SetKeyAgreementAlgorithm sets the KeyAgreementAlgorithm field's value.
+func (s *DeriveSharedSecretOutput) SetKeyAgreementAlgorithm(v string) *DeriveSharedSecretOutput {
+ s.KeyAgreementAlgorithm = &v
+ return s
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *DeriveSharedSecretOutput) SetKeyId(v string) *DeriveSharedSecretOutput {
+ s.KeyId = &v
+ return s
+}
+
+// SetKeyOrigin sets the KeyOrigin field's value.
+func (s *DeriveSharedSecretOutput) SetKeyOrigin(v string) *DeriveSharedSecretOutput {
+ s.KeyOrigin = &v
+ return s
+}
+
+// SetSharedSecret sets the SharedSecret field's value.
+func (s *DeriveSharedSecretOutput) SetSharedSecret(v []byte) *DeriveSharedSecretOutput {
+ s.SharedSecret = v
+ return s
+}
+
+type DescribeCustomKeyStoresInput struct {
+ _ struct{} `type:"structure"`
+
+ // Gets only information about the specified custom key store. Enter the key
+ // store ID.
+ //
+ // By default, this operation gets information about all custom key stores in
+ // the account and Region. To limit the output to a particular custom key store,
+ // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but
+ // not both.
+ CustomKeyStoreId *string `min:"1" type:"string"`
+
+ // Gets only information about the specified custom key store. Enter the friendly
+ // name of the custom key store.
+ //
+ // By default, this operation gets information about all custom key stores in
+ // the account and Region. To limit the output to a particular custom key store,
+ // provide either the CustomKeyStoreId or CustomKeyStoreName parameter, but
+ // not both.
+ CustomKeyStoreName *string `min:"1" type:"string"`
+
+ // Use this parameter to specify the maximum number of items to return. When
+ // this value is present, KMS does not return more than the specified number
+ // of items, but it might return fewer.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // Use this parameter in a subsequent request after you receive a response with
+ // truncated results. Set it to the value of NextMarker from the truncated response
+ // you just received.
+ Marker *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeCustomKeyStoresInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeCustomKeyStoresInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeCustomKeyStoresInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeCustomKeyStoresInput"}
+ if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1))
+ }
+ if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+ if s.Marker != nil && len(*s.Marker) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Marker", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCustomKeyStoreId sets the CustomKeyStoreId field's value.
+func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreId(v string) *DescribeCustomKeyStoresInput {
+ s.CustomKeyStoreId = &v
+ return s
+}
+
+// SetCustomKeyStoreName sets the CustomKeyStoreName field's value.
+func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreName(v string) *DescribeCustomKeyStoresInput {
+ s.CustomKeyStoreName = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *DescribeCustomKeyStoresInput) SetLimit(v int64) *DescribeCustomKeyStoresInput {
+ s.Limit = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *DescribeCustomKeyStoresInput) SetMarker(v string) *DescribeCustomKeyStoresInput {
+ s.Marker = &v
+ return s
+}
+
+type DescribeCustomKeyStoresOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains metadata about each custom key store.
CustomKeyStores []*CustomKeyStoresListEntry `type:"list"`
// When Truncated is true, this element is present and contains the value to
@@ -10816,7 +13177,7 @@ type DescribeCustomKeyStoresOutput struct {
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -11277,6 +13638,70 @@ func (s DisconnectCustomKeyStoreOutput) GoString() string {
return s.String()
}
+// The request was rejected because the DryRun parameter was specified.
+type DryRunOperationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DryRunOperationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DryRunOperationException) GoString() string {
+ return s.String()
+}
+
+func newErrorDryRunOperationException(v protocol.ResponseMetadata) error {
+ return &DryRunOperationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *DryRunOperationException) Code() string {
+ return "DryRunOperationException"
+}
+
+// Message returns the exception's message.
+func (s *DryRunOperationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *DryRunOperationException) OrigErr() error {
+ return nil
+}
+
+func (s *DryRunOperationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *DryRunOperationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *DryRunOperationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
type EnableKeyInput struct {
_ struct{} `type:"structure"`
@@ -11361,13 +13786,13 @@ func (s EnableKeyOutput) GoString() string {
type EnableKeyRotationInput struct {
_ struct{} `type:"structure"`
- // Identifies a symmetric encryption KMS key. You cannot enable or disable automatic
- // rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+ // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation
+ // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
- // The key rotation status of these KMS keys is always false. To enable or disable
- // automatic rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+ // To enable or disable automatic rotation of a set of related multi-Region
+ // keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
// set the property on the primary key.
//
// Specify the key ID or key ARN of the KMS key.
@@ -11382,6 +13807,18 @@ type EnableKeyRotationInput struct {
//
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
+
+ // Use this parameter to specify a custom period of time between each rotation
+ // date. If no value is specified, the default value is 365 days.
+ //
+ // The rotation period defines the number of days after you enable automatic
+ // key rotation that KMS will rotate your key material, and the number of days
+ // between each automatic rotation thereafter.
+ //
+ // You can use the kms:RotationPeriodInDays (https://docs.aws.amazon.com/kms/latest/developerguide/conditions-kms.html#conditions-kms-rotation-period-in-days)
+ // condition key to further constrain the values that principals can specify
+ // in the RotationPeriodInDays parameter.
+ RotationPeriodInDays *int64 `min:"90" type:"integer"`
}
// String returns the string representation.
@@ -11411,6 +13848,9 @@ func (s *EnableKeyRotationInput) Validate() error {
if s.KeyId != nil && len(*s.KeyId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
}
+ if s.RotationPeriodInDays != nil && *s.RotationPeriodInDays < 90 {
+ invalidParams.Add(request.NewErrParamMinValue("RotationPeriodInDays", 90))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -11424,6 +13864,12 @@ func (s *EnableKeyRotationInput) SetKeyId(v string) *EnableKeyRotationInput {
return s
}
+// SetRotationPeriodInDays sets the RotationPeriodInDays field's value.
+func (s *EnableKeyRotationInput) SetRotationPeriodInDays(v int64) *EnableKeyRotationInput {
+ s.RotationPeriodInDays = &v
+ return s
+}
+
type EnableKeyRotationOutput struct {
_ struct{} `type:"structure"`
}
@@ -11449,12 +13895,21 @@ func (s EnableKeyRotationOutput) GoString() string {
type EncryptInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption algorithm that KMS will use to encrypt the plaintext
// message. The algorithm must be compatible with the KMS key that you specify.
//
// This parameter is required only for asymmetric KMS keys. The default value,
// SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys.
// If you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256.
+ //
+ // The SM2PKE algorithm is only available in China Regions.
EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"`
// Specifies the encryption context that will be used to encrypt the data. An
@@ -11462,6 +13917,9 @@ type EncryptInput struct {
// with a symmetric encryption KMS key. The standard asymmetric encryption algorithms
// and HMAC algorithms that KMS uses do not support an encryption context.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// An encryption context is a collection of non-secret key-value pairs that
// represent additional authenticated data. When you use an encryption context
// to encrypt data, you must specify the same (an exact case-sensitive match)
@@ -11560,6 +14018,12 @@ func (s *EncryptInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *EncryptInput) SetDryRun(v bool) *EncryptInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value.
func (s *EncryptInput) SetEncryptionAlgorithm(v string) *EncryptInput {
s.EncryptionAlgorithm = &v
@@ -11711,9 +14175,19 @@ func (s *ExpiredImportTokenException) RequestID() string {
type GenerateDataKeyInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption context that will be used when encrypting the data
// key.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// An encryption context is a collection of non-secret key-value pairs that
// represent additional authenticated data. When you use an encryption context
// to encrypt data, you must specify the same (an exact case-sensitive match)
@@ -11774,6 +14248,29 @@ type GenerateDataKeyInput struct {
// You must specify either the KeySpec or the NumberOfBytes parameter (but not
// both) in every GenerateDataKey request.
NumberOfBytes *int64 `min:"1" type:"integer"`
+
+ // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc)
+ // from an Amazon Web Services Nitro enclave and the encryption algorithm to
+ // use with the enclave's public key. The only valid encryption algorithm is
+ // RSAES_OAEP_SHA_256.
+ //
+ // This parameter only supports attestation documents for Amazon Web Services
+ // Nitro Enclaves. To include this parameter, use the Amazon Web Services Nitro
+ // Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+ // or any Amazon Web Services SDK.
+ //
+ // When you use this parameter, instead of returning the plaintext data key,
+ // KMS encrypts the plaintext data key under the public key in the attestation
+ // document, and returns the resulting ciphertext in the CiphertextForRecipient
+ // field in the response. This ciphertext can be decrypted only with the private
+ // key in the enclave. The CiphertextBlob field in the response contains a copy
+ // of the data key encrypted under the KMS key specified by the KeyId parameter.
+ // The Plaintext field in the response is null or empty.
+ //
+ // For information about the interaction between KMS and Amazon Web Services
+ // Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ Recipient *RecipientInfo `type:"structure"`
}
// String returns the string representation.
@@ -11806,6 +14303,11 @@ func (s *GenerateDataKeyInput) Validate() error {
if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 {
invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1))
}
+ if s.Recipient != nil {
+ if err := s.Recipient.Validate(); err != nil {
+ invalidParams.AddNested("Recipient", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -11813,6 +14315,12 @@ func (s *GenerateDataKeyInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *GenerateDataKeyInput) SetDryRun(v bool) *GenerateDataKeyInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionContext sets the EncryptionContext field's value.
func (s *GenerateDataKeyInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyInput {
s.EncryptionContext = v
@@ -11843,6 +14351,12 @@ func (s *GenerateDataKeyInput) SetNumberOfBytes(v int64) *GenerateDataKeyInput {
return s
}
+// SetRecipient sets the Recipient field's value.
+func (s *GenerateDataKeyInput) SetRecipient(v *RecipientInfo) *GenerateDataKeyInput {
+ s.Recipient = v
+ return s
+}
+
type GenerateDataKeyOutput struct {
_ struct{} `type:"structure"`
@@ -11851,6 +14365,19 @@ type GenerateDataKeyOutput struct {
// CiphertextBlob is automatically base64 encoded/decoded by the SDK.
CiphertextBlob []byte `min:"1" type:"blob"`
+ // The plaintext data key encrypted with the public key from the Nitro enclave.
+ // This ciphertext can be decrypted only by using a private key in the Nitro
+ // enclave.
+ //
+ // This field is included in the response only when the Recipient parameter
+ // in the request includes a valid attestation document from an Amazon Web Services
+ // Nitro enclave. For information about the interaction between KMS and Amazon
+ // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses
+ // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ // CiphertextForRecipient is automatically base64 encoded/decoded by the SDK.
+ CiphertextForRecipient []byte `min:"1" type:"blob"`
+
// The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN))
// of the KMS key that encrypted the data key.
KeyId *string `min:"1" type:"string"`
@@ -11860,6 +14387,9 @@ type GenerateDataKeyOutput struct {
// this data key to encrypt your data outside of KMS. Then, remove it from memory
// as soon as possible.
//
+ // If the response includes the CiphertextForRecipient field, the Plaintext
+ // field is null or empty.
+ //
// Plaintext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GenerateDataKeyOutput's
// String and GoString methods.
@@ -11892,6 +14422,12 @@ func (s *GenerateDataKeyOutput) SetCiphertextBlob(v []byte) *GenerateDataKeyOutp
return s
}
+// SetCiphertextForRecipient sets the CiphertextForRecipient field's value.
+func (s *GenerateDataKeyOutput) SetCiphertextForRecipient(v []byte) *GenerateDataKeyOutput {
+ s.CiphertextForRecipient = v
+ return s
+}
+
// SetKeyId sets the KeyId field's value.
func (s *GenerateDataKeyOutput) SetKeyId(v string) *GenerateDataKeyOutput {
s.KeyId = &v
@@ -11907,9 +14443,19 @@ func (s *GenerateDataKeyOutput) SetPlaintext(v []byte) *GenerateDataKeyOutput {
type GenerateDataKeyPairInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption context that will be used when encrypting the private
// key in the data key pair.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// An encryption context is a collection of non-secret key-value pairs that
// represent additional authenticated data. When you use an encryption context
// to encrypt data, you must specify the same (an exact case-sensitive match)
@@ -11963,11 +14509,36 @@ type GenerateDataKeyPairInput struct {
// encrypt and decrypt or to sign and verify (but not both), and the rule that
// permits you to use ECC KMS keys only to sign and verify, are not effective
// on data key pairs, which are used outside of KMS. The SM2 key spec is only
- // available in China Regions. RSA and ECC asymmetric key pairs are also available
- // in China Regions.
+ // available in China Regions.
//
// KeyPairSpec is a required field
KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"`
+
+ // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc)
+ // from an Amazon Web Services Nitro enclave and the encryption algorithm to
+ // use with the enclave's public key. The only valid encryption algorithm is
+ // RSAES_OAEP_SHA_256.
+ //
+ // This parameter only supports attestation documents for Amazon Web Services
+ // Nitro Enclaves. To call DeriveSharedSecret for an Amazon Web Services Nitro
+ // Enclaves, use the Amazon Web Services Nitro Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+ // to generate the attestation document and then use the Recipient parameter
+ // from any Amazon Web Services SDK to provide the attestation document for
+ // the enclave.
+ //
+ // When you use this parameter, instead of returning a plaintext copy of the
+ // private data key, KMS encrypts the plaintext private data key under the public
+ // key in the attestation document, and returns the resulting ciphertext in
+ // the CiphertextForRecipient field in the response. This ciphertext can be
+ // decrypted only with the private key in the enclave. The CiphertextBlob field
+ // in the response contains a copy of the private data key encrypted under the
+ // KMS key specified by the KeyId parameter. The PrivateKeyPlaintext field in
+ // the response is null or empty.
+ //
+ // For information about the interaction between KMS and Amazon Web Services
+ // Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ Recipient *RecipientInfo `type:"structure"`
}
// String returns the string representation.
@@ -12000,6 +14571,11 @@ func (s *GenerateDataKeyPairInput) Validate() error {
if s.KeyPairSpec == nil {
invalidParams.Add(request.NewErrParamRequired("KeyPairSpec"))
}
+ if s.Recipient != nil {
+ if err := s.Recipient.Validate(); err != nil {
+ invalidParams.AddNested("Recipient", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -12007,6 +14583,12 @@ func (s *GenerateDataKeyPairInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *GenerateDataKeyPairInput) SetDryRun(v bool) *GenerateDataKeyPairInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionContext sets the EncryptionContext field's value.
func (s *GenerateDataKeyPairInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairInput {
s.EncryptionContext = v
@@ -12031,9 +14613,28 @@ func (s *GenerateDataKeyPairInput) SetKeyPairSpec(v string) *GenerateDataKeyPair
return s
}
+// SetRecipient sets the Recipient field's value.
+func (s *GenerateDataKeyPairInput) SetRecipient(v *RecipientInfo) *GenerateDataKeyPairInput {
+ s.Recipient = v
+ return s
+}
+
type GenerateDataKeyPairOutput struct {
_ struct{} `type:"structure"`
+ // The plaintext private data key encrypted with the public key from the Nitro
+ // enclave. This ciphertext can be decrypted only by using a private key in
+ // the Nitro enclave.
+ //
+ // This field is included in the response only when the Recipient parameter
+ // in the request includes a valid attestation document from an Amazon Web Services
+ // Nitro enclave. For information about the interaction between KMS and Amazon
+ // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses
+ // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ // CiphertextForRecipient is automatically base64 encoded/decoded by the SDK.
+ CiphertextForRecipient []byte `min:"1" type:"blob"`
+
// The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN))
// of the KMS key that encrypted the private key.
KeyId *string `min:"1" type:"string"`
@@ -12049,6 +14650,9 @@ type GenerateDataKeyPairOutput struct {
// The plaintext copy of the private key. When you use the HTTP API or the Amazon
// Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.
//
+ // If the response includes the CiphertextForRecipient field, the PrivateKeyPlaintext
+ // field is null or empty.
+ //
// PrivateKeyPlaintext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GenerateDataKeyPairOutput's
// String and GoString methods.
@@ -12080,7 +14684,13 @@ func (s GenerateDataKeyPairOutput) GoString() string {
return s.String()
}
-// SetKeyId sets the KeyId field's value.
+// SetCiphertextForRecipient sets the CiphertextForRecipient field's value.
+func (s *GenerateDataKeyPairOutput) SetCiphertextForRecipient(v []byte) *GenerateDataKeyPairOutput {
+ s.CiphertextForRecipient = v
+ return s
+}
+
+// SetKeyId sets the KeyId field's value.
func (s *GenerateDataKeyPairOutput) SetKeyId(v string) *GenerateDataKeyPairOutput {
s.KeyId = &v
return s
@@ -12113,9 +14723,19 @@ func (s *GenerateDataKeyPairOutput) SetPublicKey(v []byte) *GenerateDataKeyPairO
type GenerateDataKeyPairWithoutPlaintextInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption context that will be used when encrypting the private
// key in the data key pair.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// An encryption context is a collection of non-secret key-value pairs that
// represent additional authenticated data. When you use an encryption context
// to encrypt data, you must specify the same (an exact case-sensitive match)
@@ -12169,8 +14789,7 @@ type GenerateDataKeyPairWithoutPlaintextInput struct {
// encrypt and decrypt or to sign and verify (but not both), and the rule that
// permits you to use ECC KMS keys only to sign and verify, are not effective
// on data key pairs, which are used outside of KMS. The SM2 key spec is only
- // available in China Regions. RSA and ECC asymmetric key pairs are also available
- // in China Regions.
+ // available in China Regions.
//
// KeyPairSpec is a required field
KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"`
@@ -12213,6 +14832,12 @@ func (s *GenerateDataKeyPairWithoutPlaintextInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *GenerateDataKeyPairWithoutPlaintextInput) SetDryRun(v bool) *GenerateDataKeyPairWithoutPlaintextInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionContext sets the EncryptionContext field's value.
func (s *GenerateDataKeyPairWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairWithoutPlaintextInput {
s.EncryptionContext = v
@@ -12303,9 +14928,19 @@ func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetPublicKey(v []byte) *Gene
type GenerateDataKeyWithoutPlaintextInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Specifies the encryption context that will be used when encrypting the data
// key.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// An encryption context is a collection of non-secret key-value pairs that
// represent additional authenticated data. When you use an encryption context
// to encrypt data, you must specify the same (an exact case-sensitive match)
@@ -12400,6 +15035,12 @@ func (s *GenerateDataKeyWithoutPlaintextInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *GenerateDataKeyWithoutPlaintextInput) SetDryRun(v bool) *GenerateDataKeyWithoutPlaintextInput {
+ s.DryRun = &v
+ return s
+}
+
// SetEncryptionContext sets the EncryptionContext field's value.
func (s *GenerateDataKeyWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyWithoutPlaintextInput {
s.EncryptionContext = v
@@ -12476,6 +15117,13 @@ func (s *GenerateDataKeyWithoutPlaintextOutput) SetKeyId(v string) *GenerateData
type GenerateMacInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -12562,6 +15210,12 @@ func (s *GenerateMacInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *GenerateMacInput) SetDryRun(v bool) *GenerateMacInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *GenerateMacInput) SetGrantTokens(v []*string) *GenerateMacInput {
s.GrantTokens = v
@@ -12592,8 +15246,10 @@ type GenerateMacOutput struct {
// The HMAC KMS key used in the operation.
KeyId *string `min:"1" type:"string"`
- // The hash-based message authentication code (HMAC) for the given message,
- // key, and MAC algorithm.
+ // The hash-based message authentication code (HMAC) that was generated for
+ // the specified message, HMAC KMS key, and MAC algorithm.
+ //
+ // This is the standard, raw HMAC defined in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104).
// Mac is automatically base64 encoded/decoded by the SDK.
Mac []byte `min:"1" type:"blob"`
@@ -12641,12 +15297,36 @@ type GenerateRandomInput struct {
_ struct{} `type:"structure"`
// Generates the random byte string in the CloudHSM cluster that is associated
- // with the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
- // To find the ID of a custom key store, use the DescribeCustomKeyStores operation.
+ // with the specified CloudHSM key store. To find the ID of a custom key store,
+ // use the DescribeCustomKeyStores operation.
+ //
+ // External key store IDs are not valid for this parameter. If you specify the
+ // ID of an external key store, GenerateRandom throws an UnsupportedOperationException.
CustomKeyStoreId *string `min:"1" type:"string"`
// The length of the random byte string. This parameter is required.
NumberOfBytes *int64 `min:"1" type:"integer"`
+
+ // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc)
+ // from an Amazon Web Services Nitro enclave and the encryption algorithm to
+ // use with the enclave's public key. The only valid encryption algorithm is
+ // RSAES_OAEP_SHA_256.
+ //
+ // This parameter only supports attestation documents for Amazon Web Services
+ // Nitro Enclaves. To include this parameter, use the Amazon Web Services Nitro
+ // Enclaves SDK (https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk)
+ // or any Amazon Web Services SDK.
+ //
+ // When you use this parameter, instead of returning plaintext bytes, KMS encrypts
+ // the plaintext bytes under the public key in the attestation document, and
+ // returns the resulting ciphertext in the CiphertextForRecipient field in the
+ // response. This ciphertext can be decrypted only with the private key in the
+ // enclave. The Plaintext field in the response is null or empty.
+ //
+ // For information about the interaction between KMS and Amazon Web Services
+ // Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ Recipient *RecipientInfo `type:"structure"`
}
// String returns the string representation.
@@ -12676,6 +15356,11 @@ func (s *GenerateRandomInput) Validate() error {
if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 {
invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1))
}
+ if s.Recipient != nil {
+ if err := s.Recipient.Validate(); err != nil {
+ invalidParams.AddNested("Recipient", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -12695,12 +15380,34 @@ func (s *GenerateRandomInput) SetNumberOfBytes(v int64) *GenerateRandomInput {
return s
}
+// SetRecipient sets the Recipient field's value.
+func (s *GenerateRandomInput) SetRecipient(v *RecipientInfo) *GenerateRandomInput {
+ s.Recipient = v
+ return s
+}
+
type GenerateRandomOutput struct {
_ struct{} `type:"structure"`
+ // The plaintext random bytes encrypted with the public key from the Nitro enclave.
+ // This ciphertext can be decrypted only by using a private key in the Nitro
+ // enclave.
+ //
+ // This field is included in the response only when the Recipient parameter
+ // in the request includes a valid attestation document from an Amazon Web Services
+ // Nitro enclave. For information about the interaction between KMS and Amazon
+ // Web Services Nitro Enclaves, see How Amazon Web Services Nitro Enclaves uses
+ // KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+ // in the Key Management Service Developer Guide.
+ // CiphertextForRecipient is automatically base64 encoded/decoded by the SDK.
+ CiphertextForRecipient []byte `min:"1" type:"blob"`
+
// The random byte string. When you use the HTTP API or the Amazon Web Services
// CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded.
//
+ // If the response includes the CiphertextForRecipient field, the Plaintext
+ // field is null or empty.
+ //
// Plaintext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GenerateRandomOutput's
// String and GoString methods.
@@ -12727,6 +15434,12 @@ func (s GenerateRandomOutput) GoString() string {
return s.String()
}
+// SetCiphertextForRecipient sets the CiphertextForRecipient field's value.
+func (s *GenerateRandomOutput) SetCiphertextForRecipient(v []byte) *GenerateRandomOutput {
+ s.CiphertextForRecipient = v
+ return s
+}
+
// SetPlaintext sets the Plaintext field's value.
func (s *GenerateRandomOutput) SetPlaintext(v []byte) *GenerateRandomOutput {
s.Plaintext = v
@@ -12751,11 +15464,10 @@ type GetKeyPolicyInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // Specifies the name of the key policy. The only valid name is default. To
- // get the names of key policies, use ListKeyPolicies.
- //
- // PolicyName is a required field
- PolicyName *string `min:"1" type:"string" required:"true"`
+ // Specifies the name of the key policy. If no policy name is specified, the
+ // default value is default. The only valid name is default. To get the names
+ // of key policies, use ListKeyPolicies.
+ PolicyName *string `min:"1" type:"string"`
}
// String returns the string representation.
@@ -12785,9 +15497,6 @@ func (s *GetKeyPolicyInput) Validate() error {
if s.KeyId != nil && len(*s.KeyId) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
}
- if s.PolicyName == nil {
- invalidParams.Add(request.NewErrParamRequired("PolicyName"))
- }
if s.PolicyName != nil && len(*s.PolicyName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1))
}
@@ -12815,6 +15524,9 @@ type GetKeyPolicyOutput struct {
// A key policy document in JSON format.
Policy *string `min:"1" type:"string"`
+
+ // The name of the key policy. The only valid value is default.
+ PolicyName *string `min:"1" type:"string"`
}
// String returns the string representation.
@@ -12841,6 +15553,12 @@ func (s *GetKeyPolicyOutput) SetPolicy(v string) *GetKeyPolicyOutput {
return s
}
+// SetPolicyName sets the PolicyName field's value.
+func (s *GetKeyPolicyOutput) SetPolicyName(v string) *GetKeyPolicyOutput {
+ s.PolicyName = &v
+ return s
+}
+
type GetKeyRotationStatusInput struct {
_ struct{} `type:"structure"`
@@ -12904,8 +15622,27 @@ func (s *GetKeyRotationStatusInput) SetKeyId(v string) *GetKeyRotationStatusInpu
type GetKeyRotationStatusOutput struct {
_ struct{} `type:"structure"`
+ // Identifies the specified symmetric encryption KMS key.
+ KeyId *string `min:"1" type:"string"`
+
// A Boolean value that specifies whether key rotation is enabled.
KeyRotationEnabled *bool `type:"boolean"`
+
+ // The next date that KMS will automatically rotate the key material.
+ NextRotationDate *time.Time `type:"timestamp"`
+
+ // Identifies the date and time that an in progress on-demand rotation was initiated.
+ //
+ // The KMS API follows an eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html)
+ // model due to the distributed nature of the system. As a result, there might
+ // be a slight delay between initiating on-demand key rotation and the rotation's
+ // completion. Once the on-demand rotation is complete, use ListKeyRotations
+ // to view the details of the on-demand rotation.
+ OnDemandRotationStartDate *time.Time `type:"timestamp"`
+
+ // The number of days between each automatic rotation. The default value is
+ // 365 days.
+ RotationPeriodInDays *int64 `min:"90" type:"integer"`
}
// String returns the string representation.
@@ -12926,17 +15663,44 @@ func (s GetKeyRotationStatusOutput) GoString() string {
return s.String()
}
+// SetKeyId sets the KeyId field's value.
+func (s *GetKeyRotationStatusOutput) SetKeyId(v string) *GetKeyRotationStatusOutput {
+ s.KeyId = &v
+ return s
+}
+
// SetKeyRotationEnabled sets the KeyRotationEnabled field's value.
func (s *GetKeyRotationStatusOutput) SetKeyRotationEnabled(v bool) *GetKeyRotationStatusOutput {
s.KeyRotationEnabled = &v
return s
}
+// SetNextRotationDate sets the NextRotationDate field's value.
+func (s *GetKeyRotationStatusOutput) SetNextRotationDate(v time.Time) *GetKeyRotationStatusOutput {
+ s.NextRotationDate = &v
+ return s
+}
+
+// SetOnDemandRotationStartDate sets the OnDemandRotationStartDate field's value.
+func (s *GetKeyRotationStatusOutput) SetOnDemandRotationStartDate(v time.Time) *GetKeyRotationStatusOutput {
+ s.OnDemandRotationStartDate = &v
+ return s
+}
+
+// SetRotationPeriodInDays sets the RotationPeriodInDays field's value.
+func (s *GetKeyRotationStatusOutput) SetRotationPeriodInDays(v int64) *GetKeyRotationStatusOutput {
+ s.RotationPeriodInDays = &v
+ return s
+}
+
type GetParametersForImportInput struct {
_ struct{} `type:"structure"`
- // The identifier of the symmetric encryption KMS key into which you will import
- // key material. The Origin of the KMS key must be EXTERNAL.
+ // The identifier of the KMS key that will be associated with the imported key
+ // material. The Origin of the KMS key must be EXTERNAL.
+ //
+ // All KMS key types are supported, including multi-Region keys. However, you
+ // cannot import key material into a KMS key in a custom key store.
//
// Specify the key ID or key ARN of the KMS key.
//
@@ -12951,16 +15715,50 @@ type GetParametersForImportInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // The algorithm you will use to encrypt the key material before importing it
- // with ImportKeyMaterial. For more information, see Encrypt the Key Material
- // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html)
+ // The algorithm you will use with the RSA public key (PublicKey) in the response
+ // to protect your key material during import. For more information, see Select
+ // a wrapping algorithm (kms/latest/developerguide/importing-keys-get-public-key-and-token.html#select-wrapping-algorithm)
// in the Key Management Service Developer Guide.
//
+ // For RSA_AES wrapping algorithms, you encrypt your key material with an AES
+ // key that you generate, then encrypt your AES key with the RSA public key
+ // from KMS. For RSAES wrapping algorithms, you encrypt your key material directly
+ // with the RSA public key from KMS.
+ //
+ // The wrapping algorithms that you can use depend on the type of key material
+ // that you are importing. To import an RSA private key, you must use an RSA_AES
+ // wrapping algorithm.
+ //
+ // * RSA_AES_KEY_WRAP_SHA_256 — Supported for wrapping RSA and ECC key
+ // material.
+ //
+ // * RSA_AES_KEY_WRAP_SHA_1 — Supported for wrapping RSA and ECC key material.
+ //
+ // * RSAES_OAEP_SHA_256 — Supported for all types of key material, except
+ // RSA key material (private key). You cannot use the RSAES_OAEP_SHA_256
+ // wrapping algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521
+ // key material.
+ //
+ // * RSAES_OAEP_SHA_1 — Supported for all types of key material, except
+ // RSA key material (private key). You cannot use the RSAES_OAEP_SHA_1 wrapping
+ // algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key
+ // material.
+ //
+ // * RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not
+ // support the RSAES_PKCS1_V1_5 wrapping algorithm.
+ //
// WrappingAlgorithm is a required field
WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"`
- // The type of wrapping key (public key) to return in the response. Only 2048-bit
- // RSA public keys are supported.
+ // The type of RSA public key to return in the response. You will use this wrapping
+ // key with the specified wrapping algorithm to protect your key material during
+ // import.
+ //
+ // Use the longest RSA wrapping key that is practical.
+ //
+ // You cannot use an RSA_2048 public key to directly wrap an ECC_NIST_P521 private
+ // key. Instead, use an RSA_AES wrapping algorithm or choose a longer RSA public
+ // key.
//
// WrappingKeySpec is a required field
WrappingKeySpec *string `type:"string" required:"true" enum:"WrappingKeySpec"`
@@ -13183,7 +15981,7 @@ type GetPublicKeyOutput struct {
//
// The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend
// that you use the KeySpec field in your code. However, to avoid breaking changes,
- // KMS will support both fields.
+ // KMS supports both fields.
//
// Deprecated: This field has been deprecated. Instead, use the KeySpec field.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
@@ -13197,6 +15995,10 @@ type GetPublicKeyOutput struct {
// is ENCRYPT_DECRYPT.
EncryptionAlgorithms []*string `type:"list" enum:"EncryptionAlgorithmSpec"`
+ // The key agreement algorithm used to derive a shared secret. This field is
+ // present only when the KMS key has a KeyUsage value of KEY_AGREEMENT.
+ KeyAgreementAlgorithms []*string `type:"list" enum:"KeyAgreementAlgorithmSpec"`
+
// The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN))
// of the asymmetric KMS key from which the public key was downloaded.
KeyId *string `min:"1" type:"string"`
@@ -13204,11 +16006,11 @@ type GetPublicKeyOutput struct {
// The type of the of the public key that was downloaded.
KeySpec *string `type:"string" enum:"KeySpec"`
- // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or
- // SIGN_VERIFY.
+ // The permitted use of the public key. Valid values for asymmetric key pairs
+ // are ENCRYPT_DECRYPT, SIGN_VERIFY, and KEY_AGREEMENT.
//
- // This information is critical. If a public key with SIGN_VERIFY key usage
- // encrypts data outside of KMS, the ciphertext cannot be decrypted.
+ // This information is critical. For example, if a public key with SIGN_VERIFY
+ // key usage encrypts data outside of KMS, the ciphertext cannot be decrypted.
KeyUsage *string `type:"string" enum:"KeyUsageType"`
// The exported public key.
@@ -13257,6 +16059,12 @@ func (s *GetPublicKeyOutput) SetEncryptionAlgorithms(v []*string) *GetPublicKeyO
return s
}
+// SetKeyAgreementAlgorithms sets the KeyAgreementAlgorithms field's value.
+func (s *GetPublicKeyOutput) SetKeyAgreementAlgorithms(v []*string) *GetPublicKeyOutput {
+ s.KeyAgreementAlgorithms = v
+ return s
+}
+
// SetKeyId sets the KeyId field's value.
func (s *GetPublicKeyOutput) SetKeyId(v string) *GetPublicKeyOutput {
s.KeyId = &v
@@ -13293,10 +16101,10 @@ func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutp
//
// KMS applies the grant constraints only to cryptographic operations that support
// an encryption context, that is, all cryptographic operations with a symmetric
-// encryption KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks).
+// KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks).
// Grant constraints are not applied to operations that do not support an encryption
-// context, such as cryptographic operations with HMAC KMS keys or asymmetric
-// KMS keys, and management operations, such as DescribeKey or RetireGrant.
+// context, such as cryptographic operations with asymmetric KMS keys and management
+// operations, such as DescribeKey or RetireGrant.
//
// In a cryptographic operation, the encryption context in the decryption operation
// must be an exact, case-sensitive match for the keys and values in the encryption
@@ -13473,7 +16281,7 @@ type ImportKeyMaterialInput struct {
_ struct{} `type:"structure"`
// The encrypted key material to import. The key material must be encrypted
- // with the public wrapping key that GetParametersForImport returned, using
+ // under the public wrapping key that GetParametersForImport returned, using
// the wrapping algorithm that you specified in the same GetParametersForImport
// request.
// EncryptedKeyMaterial is automatically base64 encoded/decoded by the SDK.
@@ -13481,9 +16289,17 @@ type ImportKeyMaterialInput struct {
// EncryptedKeyMaterial is a required field
EncryptedKeyMaterial []byte `min:"1" type:"blob" required:"true"`
- // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES,
- // in which case you must include the ValidTo parameter. When this parameter
- // is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter.
+ // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES.
+ // For help with this choice, see Setting an expiration time (https://docs.aws.amazon.com/en_us/kms/latest/developerguide/importing-keys.html#importing-keys-expiration)
+ // in the Key Management Service Developer Guide.
+ //
+ // When the value of ExpirationModel is KEY_MATERIAL_EXPIRES, you must specify
+ // a value for the ValidTo parameter. When value is KEY_MATERIAL_DOES_NOT_EXPIRE,
+ // you must omit the ValidTo parameter.
+ //
+ // You cannot change the ExpirationModel or ValidTo values for the current import
+ // after the request completes. To change either value, you must reimport the
+ // key material.
ExpirationModel *string `type:"string" enum:"ExpirationModelType"`
// The import token that you received in the response to a previous GetParametersForImport
@@ -13494,12 +16310,16 @@ type ImportKeyMaterialInput struct {
// ImportToken is a required field
ImportToken []byte `min:"1" type:"blob" required:"true"`
- // The identifier of the symmetric encryption KMS key that receives the imported
- // key material. This must be the same KMS key specified in the KeyID parameter
+ // The identifier of the KMS key that will be associated with the imported key
+ // material. This must be the same KMS key specified in the KeyID parameter
// of the corresponding GetParametersForImport request. The Origin of the KMS
- // key must be EXTERNAL. You cannot perform this operation on an asymmetric
- // KMS key, an HMAC KMS key, a KMS key in a custom key store, or on a KMS key
- // in a different Amazon Web Services account
+ // key must be EXTERNAL and its KeyState must be PendingImport.
+ //
+ // The KMS key can be a symmetric encryption KMS key, HMAC KMS key, asymmetric
+ // encryption KMS key, or asymmetric signing KMS key, including a multi-Region
+ // key (kms/latest/developerguide/multi-region-keys-overview.html) of any supported
+ // type. You cannot perform this operation on a KMS key in a custom key store,
+ // or on a KMS key in a different Amazon Web Services account.
//
// Specify the key ID or key ARN of the KMS key.
//
@@ -13514,10 +16334,20 @@ type ImportKeyMaterialInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // The time at which the imported key material expires. When the key material
- // expires, KMS deletes the key material and the KMS key becomes unusable. You
- // must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE.
- // Otherwise it is required.
+ // The date and time when the imported key material expires. This parameter
+ // is required when the value of the ExpirationModel parameter is KEY_MATERIAL_EXPIRES.
+ // Otherwise it is not valid.
+ //
+ // The value of this parameter must be a future date and time. The maximum value
+ // is 365 days from the request date.
+ //
+ // When the key material expires, KMS deletes the key material from the KMS
+ // key. Without its key material, the KMS key is unusable. To use the KMS key
+ // in cryptographic operations, you must reimport the same key material.
+ //
+ // You cannot change the ExpirationModel or ValidTo values for the current import
+ // after the request completes. To change either value, you must delete (DeleteImportedKeyMaterial)
+ // and reimport the key material.
ValidTo *time.Time `type:"timestamp"`
}
@@ -13752,9 +16582,10 @@ func (s *IncorrectKeyMaterialException) RequestID() string {
}
// The request was rejected because the trust anchor certificate in the request
-// is not the trust anchor certificate for the specified CloudHSM cluster.
+// to create an CloudHSM key store is not the trust anchor certificate for the
+// specified CloudHSM cluster.
//
-// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+// When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
type IncorrectTrustAnchorException struct {
@@ -14287,7 +17118,8 @@ func (s *InvalidImportTokenException) RequestID() string {
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
-// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+// agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -14423,9 +17255,17 @@ func (s *InvalidMarkerException) RequestID() string {
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
-// For more information about how key state affects the use of a KMS key, see
-// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
-// in the Key Management Service Developer Guide .
+// This exceptions means one of the following:
+//
+// - The key state of the KMS key is not compatible with the operation. To
+// find the key state, use the DescribeKey operation. For more information
+// about which key states are compatible with each KMS operation, see Key
+// states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+// in the Key Management Service Developer Guide .
+//
+// - For cryptographic operations on KMS keys in custom key stores, this
+// exception represents a general failure with many possible causes. To identify
+// the cause, see the error message that accompanies the exception.
type InvalidStateException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -14664,8 +17504,8 @@ func (s *KeyListEntry) SetKeyId(v string) *KeyListEntry {
// Contains metadata about a KMS key.
//
-// This data type is used as a response element for the CreateKey and DescribeKey
-// operations.
+// This data type is used as a response element for the CreateKey, DescribeKey,
+// and ReplicateKey operations.
type KeyMetadata struct {
_ struct{} `type:"structure"`
@@ -14679,16 +17519,17 @@ type KeyMetadata struct {
Arn *string `min:"20" type:"string"`
// The cluster ID of the CloudHSM cluster that contains the key material for
- // the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
+ // the KMS key. When you create a KMS key in an CloudHSM custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html),
// KMS creates the key material for the KMS key in the associated CloudHSM cluster.
- // This value is present only when the KMS key is created in a custom key store.
+ // This field is present only when the KMS key is created in an CloudHSM key
+ // store.
CloudHsmClusterId *string `min:"19" type:"string"`
// The date and time when the KMS key was created.
CreationDate *time.Time `type:"timestamp"`
// A unique identifier for the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html)
- // that contains the KMS key. This value is present only when the KMS key is
+ // that contains the KMS key. This field is present only when the KMS key is
// created in a custom key store.
CustomKeyStoreId *string `min:"1" type:"string"`
@@ -14696,7 +17537,7 @@ type KeyMetadata struct {
//
// The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend
// that you use the KeySpec field in your code. However, to avoid breaking changes,
- // KMS will support both fields.
+ // KMS supports both fields.
//
// Deprecated: This field has been deprecated. Instead, use the KeySpec field.
CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"`
@@ -14727,6 +17568,9 @@ type KeyMetadata struct {
// only when Origin is EXTERNAL, otherwise this value is omitted.
ExpirationModel *string `type:"string" enum:"ExpirationModelType"`
+ // The key agreement algorithm used to derive a shared secret.
+ KeyAgreementAlgorithms []*string `type:"list" enum:"KeyAgreementAlgorithmSpec"`
+
// The globally unique identifier for the KMS key.
//
// KeyId is a required field
@@ -14814,6 +17658,13 @@ type KeyMetadata struct {
// value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel
// is KEY_MATERIAL_EXPIRES, otherwise this value is omitted.
ValidTo *time.Time `type:"timestamp"`
+
+ // Information about the external key that is associated with a KMS key in an
+ // external key store.
+ //
+ // For more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+ // in the Key Management Service Developer Guide.
+ XksKeyConfiguration *XksKeyConfigurationType `type:"structure"`
}
// String returns the string representation.
@@ -14900,6 +17751,12 @@ func (s *KeyMetadata) SetExpirationModel(v string) *KeyMetadata {
return s
}
+// SetKeyAgreementAlgorithms sets the KeyAgreementAlgorithms field's value.
+func (s *KeyMetadata) SetKeyAgreementAlgorithms(v []*string) *KeyMetadata {
+ s.KeyAgreementAlgorithms = v
+ return s
+}
+
// SetKeyId sets the KeyId field's value.
func (s *KeyMetadata) SetKeyId(v string) *KeyMetadata {
s.KeyId = &v
@@ -14972,6 +17829,12 @@ func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata {
return s
}
+// SetXksKeyConfiguration sets the XksKeyConfiguration field's value.
+func (s *KeyMetadata) SetXksKeyConfiguration(v *XksKeyConfigurationType) *KeyMetadata {
+ s.XksKeyConfiguration = v
+ return s
+}
+
// The request was rejected because the specified KMS key was not available.
// You can retry the request.
type KeyUnavailableException struct {
@@ -15204,7 +18067,7 @@ type ListAliasesOutput struct {
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -15374,7 +18237,7 @@ type ListGrantsResponse struct {
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -15519,7 +18382,7 @@ type ListKeyPoliciesOutput struct {
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -15560,6 +18423,149 @@ func (s *ListKeyPoliciesOutput) SetTruncated(v bool) *ListKeyPoliciesOutput {
return s
}
+type ListKeyRotationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Gets the key rotations for the specified KMS key.
+ //
+ // Specify the key ID or key ARN of the KMS key.
+ //
+ // For example:
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
+ //
+ // KeyId is a required field
+ KeyId *string `min:"1" type:"string" required:"true"`
+
+ // Use this parameter to specify the maximum number of items to return. When
+ // this value is present, KMS does not return more than the specified number
+ // of items, but it might return fewer.
+ //
+ // This value is optional. If you include a value, it must be between 1 and
+ // 1000, inclusive. If you do not include a value, it defaults to 100.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // Use this parameter in a subsequent request after you receive a response with
+ // truncated results. Set it to the value of NextMarker from the truncated response
+ // you just received.
+ Marker *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListKeyRotationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListKeyRotationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListKeyRotationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListKeyRotationsInput"}
+ if s.KeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyId"))
+ }
+ if s.KeyId != nil && len(*s.KeyId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+ if s.Marker != nil && len(*s.Marker) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Marker", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *ListKeyRotationsInput) SetKeyId(v string) *ListKeyRotationsInput {
+ s.KeyId = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *ListKeyRotationsInput) SetLimit(v int64) *ListKeyRotationsInput {
+ s.Limit = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListKeyRotationsInput) SetMarker(v string) *ListKeyRotationsInput {
+ s.Marker = &v
+ return s
+}
+
+type ListKeyRotationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // When Truncated is true, this element is present and contains the value to
+ // use for the Marker parameter in a subsequent request.
+ NextMarker *string `min:"1" type:"string"`
+
+ // A list of completed key material rotations.
+ Rotations []*RotationsListEntry `type:"list"`
+
+ // A flag that indicates whether there are more items in the list. When this
+ // value is true, the list in this response is truncated. To get more items,
+ // pass the value of the NextMarker element in this response to the Marker parameter
+ // in a subsequent request.
+ Truncated *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListKeyRotationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListKeyRotationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListKeyRotationsOutput) SetNextMarker(v string) *ListKeyRotationsOutput {
+ s.NextMarker = &v
+ return s
+}
+
+// SetRotations sets the Rotations field's value.
+func (s *ListKeyRotationsOutput) SetRotations(v []*RotationsListEntry) *ListKeyRotationsOutput {
+ s.Rotations = v
+ return s
+}
+
+// SetTruncated sets the Truncated field's value.
+func (s *ListKeyRotationsOutput) SetTruncated(v bool) *ListKeyRotationsOutput {
+ s.Truncated = &v
+ return s
+}
+
type ListKeysInput struct {
_ struct{} `type:"structure"`
@@ -15635,7 +18641,7 @@ type ListKeysOutput struct {
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -15781,13 +18787,13 @@ type ListResourceTagsOutput struct {
// A list of tags. Each tag consists of a tag key and a tag value.
//
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
Tags []*Tag `type:"list"`
// A flag that indicates whether there are more items in the list. When this
// value is true, the list in this response is truncated. To get more items,
- // pass the value of the NextMarker element in thisresponse to the Marker parameter
+ // pass the value of the NextMarker element in this response to the Marker parameter
// in a subsequent request.
Truncated *bool `type:"boolean"`
}
@@ -15848,11 +18854,10 @@ type ListRetirableGrantsInput struct {
// Amazon Web Services account.
//
// To specify the retiring principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // of an Amazon Web Services principal. Valid Amazon Web Services principals
- // include Amazon Web Services accounts (root), IAM users, federated users,
- // and assumed role users. For examples of the ARN syntax for specifying a principal,
- // see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam)
- // in the Example ARNs section of the Amazon Web Services General Reference.
+ // of an Amazon Web Services principal. Valid principals include Amazon Web
+ // Services accounts, IAM users, IAM roles, federated users, and assumed role
+ // users. For help with the ARN syntax for a principal, see IAM ARNs (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns)
+ // in the Identity and Access Management User Guide .
//
// RetiringPrincipal is a required field
RetiringPrincipal *string `min:"1" type:"string" required:"true"`
@@ -16146,19 +19151,18 @@ func (s *NotFoundException) RequestID() string {
type PutKeyPolicyInput struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
//
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
// Use this parameter only when you intend to prevent the principal that is
- // making the request from making a subsequent PutKeyPolicy request on the KMS
- // key.
- //
- // The default value is false.
+ // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
// Sets the key policy on the specified KMS key.
@@ -16180,20 +19184,19 @@ type PutKeyPolicyInput struct {
//
// The key policy must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy
- // must allow the principal that is making the PutKeyPolicy request to make
- // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk
- // that the KMS key becomes unmanageable. For more information, refer to
- // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide.
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
// in the Amazon Web Services Identity and Access Management User Guide.
//
// A key policy document can include only the following characters:
@@ -16208,17 +19211,16 @@ type PutKeyPolicyInput struct {
// characters
//
// For information about key policies, see Key policies in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html)
- // in the Key Management Service Developer Guide. For help writing and formatting
+ // in the Key Management Service Developer Guide.For help writing and formatting
// a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html)
// in the Identity and Access Management User Guide .
//
// Policy is a required field
Policy *string `min:"1" type:"string" required:"true"`
- // The name of the key policy. The only valid value is default.
- //
- // PolicyName is a required field
- PolicyName *string `min:"1" type:"string" required:"true"`
+ // The name of the key policy. If no policy name is specified, the default value
+ // is default. The only valid value is default.
+ PolicyName *string `min:"1" type:"string"`
}
// String returns the string representation.
@@ -16254,9 +19256,6 @@ func (s *PutKeyPolicyInput) Validate() error {
if s.Policy != nil && len(*s.Policy) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
}
- if s.PolicyName == nil {
- invalidParams.Add(request.NewErrParamRequired("PolicyName"))
- }
if s.PolicyName != nil && len(*s.PolicyName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1))
}
@@ -16332,6 +19331,9 @@ type ReEncryptInput struct {
// Specifies that encryption context to use when the reencrypting the data.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// A destination encryption context is valid only when the destination KMS key
// is a symmetric encryption KMS key. The standard ciphertext format for asymmetric
// KMS keys does not include fields for metadata.
@@ -16374,6 +19376,13 @@ type ReEncryptInput struct {
// DestinationKeyId is a required field
DestinationKeyId *string `min:"1" type:"string" required:"true"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -16508,6 +19517,12 @@ func (s *ReEncryptInput) SetDestinationKeyId(v string) *ReEncryptInput {
return s
}
+// SetDryRun sets the DryRun field's value.
+func (s *ReEncryptInput) SetDryRun(v bool) *ReEncryptInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *ReEncryptInput) SetGrantTokens(v []*string) *ReEncryptInput {
s.GrantTokens = v
@@ -16603,26 +19618,93 @@ func (s *ReEncryptOutput) SetSourceKeyId(v string) *ReEncryptOutput {
return s
}
-type ReplicateKeyInput struct {
+// Contains information about the party that receives the response from the
+// API operation.
+//
+// This data type is designed to support Amazon Web Services Nitro Enclaves,
+// which lets you create an isolated compute environment in Amazon EC2. For
+// information about the interaction between KMS and Amazon Web Services Nitro
+// Enclaves, see How Amazon Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html)
+// in the Key Management Service Developer Guide.
+type RecipientInfo struct {
_ struct{} `type:"structure"`
- // A flag to indicate whether to bypass the key policy lockout safety check.
- //
+ // The attestation document for an Amazon Web Services Nitro Enclave. This document
+ // includes the enclave's public key.
+ // AttestationDocument is automatically base64 encoded/decoded by the SDK.
+ AttestationDocument []byte `min:"1" type:"blob"`
+
+ // The encryption algorithm that KMS should use with the public key for an Amazon
+ // Web Services Nitro Enclave to encrypt plaintext values for the response.
+ // The only valid value is RSAES_OAEP_SHA_256.
+ KeyEncryptionAlgorithm *string `type:"string" enum:"KeyEncryptionMechanism"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RecipientInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RecipientInfo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RecipientInfo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RecipientInfo"}
+ if s.AttestationDocument != nil && len(s.AttestationDocument) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttestationDocument", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttestationDocument sets the AttestationDocument field's value.
+func (s *RecipientInfo) SetAttestationDocument(v []byte) *RecipientInfo {
+ s.AttestationDocument = v
+ return s
+}
+
+// SetKeyEncryptionAlgorithm sets the KeyEncryptionAlgorithm field's value.
+func (s *RecipientInfo) SetKeyEncryptionAlgorithm(v string) *RecipientInfo {
+ s.KeyEncryptionAlgorithm = &v
+ return s
+}
+
+type ReplicateKeyInput struct {
+ _ struct{} `type:"structure"`
+
+ // Skips ("bypasses") the key policy lockout safety check. The default value
+ // is false.
+ //
// Setting this value to true increases the risk that the KMS key becomes unmanageable.
// Do not set this value to true indiscriminately.
//
- // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section in the Key Management Service Developer Guide.
+ // For more information, see Default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide.
//
// Use this parameter only when you intend to prevent the principal that is
- // making the request from making a subsequent PutKeyPolicy request on the KMS
- // key.
- //
- // The default value is false.
+ // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html)
+ // request on the KMS key.
BypassPolicyLockoutSafetyCheck *bool `type:"boolean"`
// A description of the KMS key. The default value is an empty string (no description).
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// The description is not a shared property of multi-Region keys. You can specify
// the same description or a different description for each key in a set of
// related multi-Region keys. KMS does not synchronize this property.
@@ -16655,20 +19737,20 @@ type ReplicateKeyInput struct {
//
// If you provide a key policy, it must meet the following criteria:
//
- // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy
- // must give the caller kms:PutKeyPolicy permission on the replica key. This
- // reduces the risk that the KMS key becomes unmanageable. For more information,
- // refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam)
- // section of the Key Management Service Developer Guide .
+ // * The key policy must allow the calling principal to make a subsequent
+ // PutKeyPolicy request on the KMS key. This reduces the risk that the KMS
+ // key becomes unmanageable. For more information, see Default key policy
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-default.html#prevent-unmanageable-key)
+ // in the Key Management Service Developer Guide. (To omit this condition,
+ // set BypassPolicyLockoutSafetyCheck to true.)
//
// * Each statement in the key policy must contain one or more principals.
// The principals in the key policy must exist and be visible to KMS. When
- // you create a new Amazon Web Services principal (for example, an IAM user
- // or role), you might need to enforce a delay before including the new principal
- // in a key policy because the new principal might not be immediately visible
- // to KMS. For more information, see Changes that I make are not always immediately
- // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
- // in the Identity and Access Management User Guide .
+ // you create a new Amazon Web Services principal, you might need to enforce
+ // a delay before including the new principal in a key policy because the
+ // new principal might not be immediately visible to KMS. For more information,
+ // see Changes that I make are not always immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency)
+ // in the Amazon Web Services Identity and Access Management User Guide.
//
// A key policy document can include only the following characters:
//
@@ -16719,8 +19801,11 @@ type ReplicateKeyInput struct {
// KMS key when it is created. To tag an existing KMS key, use the TagResource
// operation.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// Tagging or untagging a KMS key can allow or deny permission to the KMS key.
- // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
+ // For details, see ABAC for KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html)
// in the Key Management Service Developer Guide.
//
// To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html)
@@ -16890,6 +19975,13 @@ func (s *ReplicateKeyOutput) SetReplicaTags(v []*Tag) *ReplicateKeyOutput {
type RetireGrantInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants,
// or ListRetirableGrants.
//
@@ -16949,6 +20041,12 @@ func (s *RetireGrantInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *RetireGrantInput) SetDryRun(v bool) *RetireGrantInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantId sets the GrantId field's value.
func (s *RetireGrantInput) SetGrantId(v string) *RetireGrantInput {
s.GrantId = &v
@@ -16992,6 +20090,13 @@ func (s RetireGrantOutput) GoString() string {
type RevokeGrantInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// Identifies the grant to revoke. To get the grant ID, use CreateGrant, ListGrants,
// or ListRetirableGrants.
//
@@ -17056,6 +20161,12 @@ func (s *RevokeGrantInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *RevokeGrantInput) SetDryRun(v bool) *RevokeGrantInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantId sets the GrantId field's value.
func (s *RevokeGrantInput) SetGrantId(v string) *RevokeGrantInput {
s.GrantId = &v
@@ -17090,6 +20201,156 @@ func (s RevokeGrantOutput) GoString() string {
return s.String()
}
+type RotateKeyOnDemandInput struct {
+ _ struct{} `type:"structure"`
+
+ // Identifies a symmetric encryption KMS key. You cannot perform on-demand rotation
+ // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html),
+ // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html),
+ // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html),
+ // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html).
+ // To perform on-demand rotation of a set of related multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate),
+ // invoke the on-demand rotation on the primary key.
+ //
+ // Specify the key ID or key ARN of the KMS key.
+ //
+ // For example:
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey.
+ //
+ // KeyId is a required field
+ KeyId *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotateKeyOnDemandInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotateKeyOnDemandInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RotateKeyOnDemandInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RotateKeyOnDemandInput"}
+ if s.KeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyId"))
+ }
+ if s.KeyId != nil && len(*s.KeyId) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeyId", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *RotateKeyOnDemandInput) SetKeyId(v string) *RotateKeyOnDemandInput {
+ s.KeyId = &v
+ return s
+}
+
+type RotateKeyOnDemandOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Identifies the symmetric encryption KMS key that you initiated on-demand
+ // rotation on.
+ KeyId *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotateKeyOnDemandOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotateKeyOnDemandOutput) GoString() string {
+ return s.String()
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *RotateKeyOnDemandOutput) SetKeyId(v string) *RotateKeyOnDemandOutput {
+ s.KeyId = &v
+ return s
+}
+
+// Contains information about completed key material rotations.
+type RotationsListEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Unique identifier of the key.
+ KeyId *string `min:"1" type:"string"`
+
+ // Date and time that the key material rotation completed. Formatted as Unix
+ // time.
+ RotationDate *time.Time `type:"timestamp"`
+
+ // Identifies whether the key material rotation was a scheduled automatic rotation
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-enable-disable)
+ // or an on-demand rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotating-keys-on-demand).
+ RotationType *string `type:"string" enum:"RotationType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotationsListEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RotationsListEntry) GoString() string {
+ return s.String()
+}
+
+// SetKeyId sets the KeyId field's value.
+func (s *RotationsListEntry) SetKeyId(v string) *RotationsListEntry {
+ s.KeyId = &v
+ return s
+}
+
+// SetRotationDate sets the RotationDate field's value.
+func (s *RotationsListEntry) SetRotationDate(v time.Time) *RotationsListEntry {
+ s.RotationDate = &v
+ return s
+}
+
+// SetRotationType sets the RotationType field's value.
+func (s *RotationsListEntry) SetRotationType(v string) *RotationsListEntry {
+ s.RotationType = &v
+ return s
+}
+
type ScheduleKeyDeletionInput struct {
_ struct{} `type:"structure"`
@@ -17116,7 +20377,10 @@ type ScheduleKeyDeletionInput struct {
// waiting period begins immediately.
//
// This value is optional. If you include a value, it must be between 7 and
- // 30, inclusive. If you do not include a value, it defaults to 30.
+ // 30, inclusive. If you do not include a value, it defaults to 30. You can
+ // use the kms:ScheduleKeyDeletionPendingWindowInDays (https://docs.aws.amazon.com/kms/latest/developerguide/conditions-kms.html#conditions-kms-schedule-key-deletion-pending-window-in-days)
+ // condition key to further constrain the values that principals can specify
+ // in the PendingWindowInDays parameter.
PendingWindowInDays *int64 `min:"1" type:"integer"`
}
@@ -17243,6 +20507,13 @@ func (s *ScheduleKeyDeletionOutput) SetPendingWindowInDays(v int64) *ScheduleKey
type SignInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -17278,10 +20549,10 @@ type SignInput struct {
KeyId *string `min:"1" type:"string" required:"true"`
// Specifies the message or message digest to sign. Messages can be 0-4096 bytes.
- // To sign a larger message, provide the message digest.
+ // To sign a larger message, provide a message digest.
//
- // If you provide a message, KMS generates a hash digest of the message and
- // then signs it.
+ // If you provide a message digest, use the DIGEST value of MessageType to prevent
+ // the digest from being hashed again while signing.
//
// Message is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by SignInput's
@@ -17292,15 +20563,44 @@ type SignInput struct {
// Message is a required field
Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"`
- // Tells KMS whether the value of the Message parameter is a message or message
- // digest. The default value, RAW, indicates a message. To indicate a message
- // digest, enter DIGEST.
+ // Tells KMS whether the value of the Message parameter should be hashed as
+ // part of the signing algorithm. Use RAW for unhashed messages; use DIGEST
+ // for message digests, which are already hashed.
+ //
+ // When the value of MessageType is RAW, KMS uses the standard signing algorithm,
+ // which begins with a hash function. When the value is DIGEST, KMS skips the
+ // hashing step in the signing algorithm.
+ //
+ // Use the DIGEST value only when the value of the Message parameter is a message
+ // digest. If you use the DIGEST value with an unhashed message, the security
+ // of the signing operation can be compromised.
+ //
+ // When the value of MessageTypeis DIGEST, the length of the Message value must
+ // match the length of hashed messages for the specified signing algorithm.
+ //
+ // You can submit a message digest and omit the MessageType or specify RAW so
+ // the digest is hashed again while signing. However, this can cause verification
+ // failures when verifying with a system that assumes a single hash.
+ //
+ // The hashing algorithm in that Sign uses is based on the SigningAlgorithm
+ // value.
+ //
+ // * Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.
+ //
+ // * SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification
+ // with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
MessageType *string `type:"string" enum:"MessageType"`
// Specifies the signing algorithm to use when signing the message.
//
// Choose an algorithm that is compatible with the type and size of the specified
- // asymmetric KMS key.
+ // asymmetric KMS key. When signing with RSA key pairs, RSASSA-PSS algorithms
+ // are preferred. We include RSASSA-PKCS1-v1_5 algorithms for compatibility
+ // with existing applications.
//
// SigningAlgorithm is a required field
SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"`
@@ -17349,6 +20649,12 @@ func (s *SignInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *SignInput) SetDryRun(v bool) *SignInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *SignInput) SetGrantTokens(v []*string) *SignInput {
s.GrantTokens = v
@@ -17392,7 +20698,7 @@ type SignOutput struct {
// this value is defined by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017).
//
// * When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing
- // algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005
+ // algorithms, this value is a DER-encoded object as defined by ANSI X9.62–2005
// and RFC 3279 Section 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3).
// This is the most commonly used signature format and is appropriate for
// most uses.
@@ -17445,6 +20751,9 @@ func (s *SignOutput) SetSigningAlgorithm(v string) *SignOutput {
// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and
// tag values are both required, but tag values can be empty (null) strings.
//
+// Do not include confidential or sensitive information in this field. This
+// field may be displayed in plaintext in CloudTrail logs and other output.
+//
// For information about the rules that apply to tag keys and tag values, see
// User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
// in the Amazon Web Services Billing and Cost Management User Guide.
@@ -17593,10 +20902,11 @@ type TagResourceInput struct {
// KeyId is a required field
KeyId *string `min:"1" type:"string" required:"true"`
- // One or more tags.
+ // One or more tags. Each tag consists of a tag key and a tag value. The tag
+ // value can be an empty (null) string.
//
- // Each tag consists of a tag key and a tag value. The tag value can be an empty
- // (null) string.
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
//
// You cannot have more than one tag on a KMS key with the same tag key. If
// you specify an existing tag key with a different tag value, KMS replaces
@@ -17854,6 +21164,9 @@ type UpdateAliasInput struct {
// with alias/ followed by the alias name, such as alias/ExampleAlias. You cannot
// use UpdateAlias to change the alias name.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// AliasName is a required field
AliasName *string `min:"1" type:"string" required:"true"`
@@ -17863,8 +21176,8 @@ type UpdateAliasInput struct {
//
// The KMS key must be in the same Amazon Web Services account and Region as
// the alias. Also, the new target KMS key must be the same type as the current
- // target KMS key (both symmetric or both asymmetric) and they must have the
- // same key usage.
+ // target KMS key (both symmetric or both asymmetric or both HMAC) and they
+ // must have the same key usage.
//
// Specify the key ID or key ARN of the KMS key.
//
@@ -17959,7 +21272,8 @@ func (s UpdateAliasOutput) GoString() string {
type UpdateCustomKeyStoreInput struct {
_ struct{} `type:"structure"`
- // Associates the custom key store with a related CloudHSM cluster.
+ // Associates the custom key store with a related CloudHSM cluster. This parameter
+ // is valid only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
//
// Enter the cluster ID of the cluster that you used to create the custom key
// store or a cluster that shares a backup history and has the same cluster
@@ -17969,6 +21283,8 @@ type UpdateCustomKeyStoreInput struct {
// for a cluster associated with a custom key store. To view the cluster certificate
// of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
+ //
+ // To change this value, the CloudHSM key store must be disconnected.
CloudHsmClusterId *string `min:"19" type:"string"`
// Identifies the custom key store that you want to update. Enter the ID of
@@ -17979,12 +21295,15 @@ type UpdateCustomKeyStoreInput struct {
CustomKeyStoreId *string `min:"1" type:"string" required:"true"`
// Enter the current password of the kmsuser crypto user (CU) in the CloudHSM
- // cluster that is associated with the custom key store.
+ // cluster that is associated with the custom key store. This parameter is valid
+ // only for custom key stores with a CustomKeyStoreType of AWS_CLOUDHSM.
//
// This parameter tells KMS the current password of the kmsuser crypto user
// (CU). It does not set or change the password of any users in the CloudHSM
// cluster.
//
+ // To change this value, the CloudHSM key store must be disconnected.
+ //
// KeyStorePassword is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UpdateCustomKeyStoreInput's
// String and GoString methods.
@@ -17992,7 +21311,85 @@ type UpdateCustomKeyStoreInput struct {
// Changes the friendly name of the custom key store to the value that you specify.
// The custom key store name must be unique in the Amazon Web Services account.
+ //
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
+ // To change this value, an CloudHSM key store must be disconnected. An external
+ // key store can be connected or disconnected.
NewCustomKeyStoreName *string `min:"1" type:"string"`
+
+ // Changes the credentials that KMS uses to sign requests to the external key
+ // store proxy (XKS proxy). This parameter is valid only for custom key stores
+ // with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // You must specify both the AccessKeyId and SecretAccessKey value in the authentication
+ // credential, even if you are only updating one value.
+ //
+ // This parameter doesn't establish or change your authentication credentials
+ // on the proxy. It just tells KMS the credential that you established with
+ // your external key store proxy. For example, if you rotate the credential
+ // on your external key store proxy, you can use this parameter to update the
+ // credential in KMS.
+ //
+ // You can change this value when the external key store is connected or disconnected.
+ XksProxyAuthenticationCredential *XksProxyAuthenticationCredentialType `type:"structure"`
+
+ // Changes the connectivity setting for the external key store. To indicate
+ // that the external key store proxy uses a Amazon VPC endpoint service to communicate
+ // with KMS, specify VPC_ENDPOINT_SERVICE. Otherwise, specify PUBLIC_ENDPOINT.
+ //
+ // If you change the XksProxyConnectivity to VPC_ENDPOINT_SERVICE, you must
+ // also change the XksProxyUriEndpoint and add an XksProxyVpcEndpointServiceName
+ // value.
+ //
+ // If you change the XksProxyConnectivity to PUBLIC_ENDPOINT, you must also
+ // change the XksProxyUriEndpoint and specify a null or empty string for the
+ // XksProxyVpcEndpointServiceName value.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyConnectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // Changes the URI endpoint that KMS uses to connect to your external key store
+ // proxy (XKS proxy). This parameter is valid only for custom key stores with
+ // a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // For external key stores with an XksProxyConnectivity value of PUBLIC_ENDPOINT,
+ // the protocol must be HTTPS.
+ //
+ // For external key stores with an XksProxyConnectivity value of VPC_ENDPOINT_SERVICE,
+ // specify https:// followed by the private DNS name associated with the VPC
+ // endpoint service. Each external key store must use a different private DNS
+ // name.
+ //
+ // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique
+ // in the Amazon Web Services account and Region.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyUriEndpoint *string `min:"10" type:"string"`
+
+ // Changes the base path to the proxy APIs for this external key store. To find
+ // this value, see the documentation for your external key manager and external
+ // key store proxy (XKS proxy). This parameter is valid only for custom key
+ // stores with a CustomKeyStoreType of EXTERNAL_KEY_STORE.
+ //
+ // The value must start with / and must end with /kms/xks/v1, where v1 represents
+ // the version of the KMS external key store proxy API. You can include an optional
+ // prefix between the required elements such as /example/kms/xks/v1.
+ //
+ // The combined XksProxyUriEndpoint and XksProxyUriPath values must be unique
+ // in the Amazon Web Services account and Region.
+ //
+ // You can change this value when the external key store is connected or disconnected.
+ XksProxyUriPath *string `min:"10" type:"string"`
+
+ // Changes the name that KMS uses to identify the Amazon VPC endpoint service
+ // for your external key store proxy (XKS proxy). This parameter is valid when
+ // the CustomKeyStoreType is EXTERNAL_KEY_STORE and the XksProxyConnectivity
+ // is VPC_ENDPOINT_SERVICE.
+ //
+ // To change this value, the external key store must be disconnected.
+ XksProxyVpcEndpointServiceName *string `min:"20" type:"string"`
}
// String returns the string representation.
@@ -18031,6 +21428,20 @@ func (s *UpdateCustomKeyStoreInput) Validate() error {
if s.NewCustomKeyStoreName != nil && len(*s.NewCustomKeyStoreName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("NewCustomKeyStoreName", 1))
}
+ if s.XksProxyUriEndpoint != nil && len(*s.XksProxyUriEndpoint) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriEndpoint", 10))
+ }
+ if s.XksProxyUriPath != nil && len(*s.XksProxyUriPath) < 10 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyUriPath", 10))
+ }
+ if s.XksProxyVpcEndpointServiceName != nil && len(*s.XksProxyVpcEndpointServiceName) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("XksProxyVpcEndpointServiceName", 20))
+ }
+ if s.XksProxyAuthenticationCredential != nil {
+ if err := s.XksProxyAuthenticationCredential.Validate(); err != nil {
+ invalidParams.AddNested("XksProxyAuthenticationCredential", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -18062,6 +21473,36 @@ func (s *UpdateCustomKeyStoreInput) SetNewCustomKeyStoreName(v string) *UpdateCu
return s
}
+// SetXksProxyAuthenticationCredential sets the XksProxyAuthenticationCredential field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyAuthenticationCredential(v *XksProxyAuthenticationCredentialType) *UpdateCustomKeyStoreInput {
+ s.XksProxyAuthenticationCredential = v
+ return s
+}
+
+// SetXksProxyConnectivity sets the XksProxyConnectivity field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyConnectivity(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyConnectivity = &v
+ return s
+}
+
+// SetXksProxyUriEndpoint sets the XksProxyUriEndpoint field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyUriEndpoint(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyUriEndpoint = &v
+ return s
+}
+
+// SetXksProxyUriPath sets the XksProxyUriPath field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyUriPath(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyUriPath = &v
+ return s
+}
+
+// SetXksProxyVpcEndpointServiceName sets the XksProxyVpcEndpointServiceName field's value.
+func (s *UpdateCustomKeyStoreInput) SetXksProxyVpcEndpointServiceName(v string) *UpdateCustomKeyStoreInput {
+ s.XksProxyVpcEndpointServiceName = &v
+ return s
+}
+
type UpdateCustomKeyStoreOutput struct {
_ struct{} `type:"structure"`
}
@@ -18089,6 +21530,9 @@ type UpdateKeyDescriptionInput struct {
// New description for the KMS key.
//
+ // Do not include confidential or sensitive information in this field. This
+ // field may be displayed in plaintext in CloudTrail logs and other output.
+ //
// Description is a required field
Description *string `type:"string" required:"true"`
@@ -18286,6 +21730,13 @@ func (s UpdatePrimaryRegionOutput) GoString() string {
type VerifyInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -18337,13 +21788,37 @@ type VerifyInput struct {
// Message is a required field
Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"`
- // Tells KMS whether the value of the Message parameter is a message or message
- // digest. The default value, RAW, indicates a message. To indicate a message
- // digest, enter DIGEST.
+ // Tells KMS whether the value of the Message parameter should be hashed as
+ // part of the signing algorithm. Use RAW for unhashed messages; use DIGEST
+ // for message digests, which are already hashed.
+ //
+ // When the value of MessageType is RAW, KMS uses the standard signing algorithm,
+ // which begins with a hash function. When the value is DIGEST, KMS skips the
+ // hashing step in the signing algorithm.
//
// Use the DIGEST value only when the value of the Message parameter is a message
- // digest. If you use the DIGEST value with a raw message, the security of the
- // verification operation can be compromised.
+ // digest. If you use the DIGEST value with an unhashed message, the security
+ // of the verification operation can be compromised.
+ //
+ // When the value of MessageTypeis DIGEST, the length of the Message value must
+ // match the length of hashed messages for the specified signing algorithm.
+ //
+ // You can submit a message digest and omit the MessageType or specify RAW so
+ // the digest is hashed again while signing. However, if the signed message
+ // is hashed once while signing, but twice while verifying, verification fails,
+ // even when the message hasn't changed.
+ //
+ // The hashing algorithm in that Verify uses is based on the SigningAlgorithm
+ // value.
+ //
+ // * Signing algorithms that end in SHA_256 use the SHA_256 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_384 use the SHA_384 hashing algorithm.
+ //
+ // * Signing algorithms that end in SHA_512 use the SHA_512 hashing algorithm.
+ //
+ // * SM2DSA uses the SM3 hashing algorithm. For details, see Offline verification
+ // with SM2 key pairs (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification).
MessageType *string `type:"string" enum:"MessageType"`
// The signature that the Sign operation generated.
@@ -18408,6 +21883,12 @@ func (s *VerifyInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *VerifyInput) SetDryRun(v bool) *VerifyInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *VerifyInput) SetGrantTokens(v []*string) *VerifyInput {
s.GrantTokens = v
@@ -18447,6 +21928,13 @@ func (s *VerifyInput) SetSigningAlgorithm(v string) *VerifyInput {
type VerifyMacInput struct {
_ struct{} `type:"structure"`
+ // Checks if your request will succeed. DryRun is an optional parameter.
+ //
+ // To learn more about how to use this parameter, see Testing your KMS API calls
+ // (https://docs.aws.amazon.com/kms/latest/developerguide/programming-dryrun.html)
+ // in the Key Management Service Developer Guide.
+ DryRun *bool `type:"boolean"`
+
// A list of grant tokens.
//
// Use a grant token when your permission to call this operation comes from
@@ -18545,6 +22033,12 @@ func (s *VerifyMacInput) Validate() error {
return nil
}
+// SetDryRun sets the DryRun field's value.
+func (s *VerifyMacInput) SetDryRun(v bool) *VerifyMacInput {
+ s.DryRun = &v
+ return s
+}
+
// SetGrantTokens sets the GrantTokens field's value.
func (s *VerifyMacInput) SetGrantTokens(v []*string) *VerifyMacInput {
s.GrantTokens = v
@@ -18683,7 +22177,1029 @@ func (s *VerifyOutput) SetSigningAlgorithm(v string) *VerifyOutput {
return s
}
-const (
+// The request was rejected because the (XksKeyId) is already associated with
+// another KMS key in this external key store. Each KMS key in an external key
+// store must be associated with a different external key.
+type XksKeyAlreadyInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyAlreadyInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyAlreadyInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksKeyAlreadyInUseException(v protocol.ResponseMetadata) error {
+ return &XksKeyAlreadyInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksKeyAlreadyInUseException) Code() string {
+ return "XksKeyAlreadyInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksKeyAlreadyInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyAlreadyInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksKeyAlreadyInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyAlreadyInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyAlreadyInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Information about the external key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)that
+// is associated with a KMS key in an external key store.
+//
+// This element appears in a CreateKey or DescribeKey response only for a KMS
+// key in an external key store.
+//
+// The external key is a symmetric encryption key that is hosted by an external
+// key manager outside of Amazon Web Services. When you use the KMS key in an
+// external key store in a cryptographic operation, the cryptographic operation
+// is performed in the external key manager using the specified external key.
+// For more information, see External key (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html#concept-external-key)
+// in the Key Management Service Developer Guide.
+type XksKeyConfigurationType struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the external key in its external key manager. This is the ID that
+ // the external key store proxy uses to identify the external key.
+ Id *string `min:"1" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyConfigurationType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyConfigurationType) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *XksKeyConfigurationType) SetId(v string) *XksKeyConfigurationType {
+ s.Id = &v
+ return s
+}
+
+// The request was rejected because the external key specified by the XksKeyId
+// parameter did not meet the configuration requirements for an external key
+// store.
+//
+// The external key must be an AES-256 symmetric key that is enabled and performs
+// encryption and decryption.
+type XksKeyInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksKeyInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksKeyInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksKeyInvalidConfigurationException) Code() string {
+ return "XksKeyInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksKeyInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksKeyInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the external key store proxy could not find
+// the external key. This exception is thrown when the value of the XksKeyId
+// parameter doesn't identify a key in the external key manager associated with
+// the external key proxy.
+//
+// Verify that the XksKeyId represents an existing key in the external key manager.
+// Use the key identifier that the external key store proxy uses to identify
+// the key. For details, see the documentation provided with your external key
+// store proxy or key manager.
+type XksKeyNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksKeyNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksKeyNotFoundException(v protocol.ResponseMetadata) error {
+ return &XksKeyNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksKeyNotFoundException) Code() string {
+ return "XksKeyNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *XksKeyNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksKeyNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *XksKeyNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksKeyNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksKeyNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS uses the authentication credential to sign requests that it sends to
+// the external key store proxy (XKS proxy) on your behalf. You establish these
+// credentials on your external key store proxy and report them to KMS.
+//
+// The XksProxyAuthenticationCredential includes two required elements.
+type XksProxyAuthenticationCredentialType struct {
+ _ struct{} `type:"structure"`
+
+ // A unique identifier for the raw secret access key.
+ //
+ // AccessKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyAuthenticationCredentialType's
+ // String and GoString methods.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"20" type:"string" required:"true" sensitive:"true"`
+
+ // A secret string of 43-64 characters. Valid characters are a-z, A-Z, 0-9,
+ // /, +, and =.
+ //
+ // RawSecretAccessKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyAuthenticationCredentialType's
+ // String and GoString methods.
+ //
+ // RawSecretAccessKey is a required field
+ RawSecretAccessKey *string `min:"43" type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyAuthenticationCredentialType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyAuthenticationCredentialType) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *XksProxyAuthenticationCredentialType) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "XksProxyAuthenticationCredentialType"}
+ if s.AccessKeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
+ }
+ if s.AccessKeyId != nil && len(*s.AccessKeyId) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 20))
+ }
+ if s.RawSecretAccessKey == nil {
+ invalidParams.Add(request.NewErrParamRequired("RawSecretAccessKey"))
+ }
+ if s.RawSecretAccessKey != nil && len(*s.RawSecretAccessKey) < 43 {
+ invalidParams.Add(request.NewErrParamMinLen("RawSecretAccessKey", 43))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *XksProxyAuthenticationCredentialType) SetAccessKeyId(v string) *XksProxyAuthenticationCredentialType {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetRawSecretAccessKey sets the RawSecretAccessKey field's value.
+func (s *XksProxyAuthenticationCredentialType) SetRawSecretAccessKey(v string) *XksProxyAuthenticationCredentialType {
+ s.RawSecretAccessKey = &v
+ return s
+}
+
+// Detailed information about the external key store proxy (XKS proxy). Your
+// external key store proxy translates KMS requests into a format that your
+// external key manager can understand. These fields appear in a DescribeCustomKeyStores
+// response only when the CustomKeyStoreType is EXTERNAL_KEY_STORE.
+type XksProxyConfigurationType struct {
+ _ struct{} `type:"structure"`
+
+ // The part of the external key store proxy authentication credential (https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateCustomKeyStore.html#KMS-CreateCustomKeyStore-request-XksProxyAuthenticationCredential)
+ // that uniquely identifies the secret access key.
+ //
+ // AccessKeyId is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by XksProxyConfigurationType's
+ // String and GoString methods.
+ AccessKeyId *string `min:"20" type:"string" sensitive:"true"`
+
+ // Indicates whether the external key store proxy uses a public endpoint or
+ // an Amazon VPC endpoint service to communicate with KMS.
+ Connectivity *string `type:"string" enum:"XksProxyConnectivityType"`
+
+ // The URI endpoint for the external key store proxy.
+ //
+ // If the external key store proxy has a public endpoint, it is displayed here.
+ //
+ // If the external key store proxy uses an Amazon VPC endpoint service name,
+ // this field displays the private DNS name associated with the VPC endpoint
+ // service.
+ UriEndpoint *string `min:"10" type:"string"`
+
+ // The path to the external key store proxy APIs.
+ UriPath *string `min:"10" type:"string"`
+
+ // The Amazon VPC endpoint service used to communicate with the external key
+ // store proxy. This field appears only when the external key store proxy uses
+ // an Amazon VPC endpoint service to communicate with KMS.
+ VpcEndpointServiceName *string `min:"20" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyConfigurationType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyConfigurationType) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *XksProxyConfigurationType) SetAccessKeyId(v string) *XksProxyConfigurationType {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetConnectivity sets the Connectivity field's value.
+func (s *XksProxyConfigurationType) SetConnectivity(v string) *XksProxyConfigurationType {
+ s.Connectivity = &v
+ return s
+}
+
+// SetUriEndpoint sets the UriEndpoint field's value.
+func (s *XksProxyConfigurationType) SetUriEndpoint(v string) *XksProxyConfigurationType {
+ s.UriEndpoint = &v
+ return s
+}
+
+// SetUriPath sets the UriPath field's value.
+func (s *XksProxyConfigurationType) SetUriPath(v string) *XksProxyConfigurationType {
+ s.UriPath = &v
+ return s
+}
+
+// SetVpcEndpointServiceName sets the VpcEndpointServiceName field's value.
+func (s *XksProxyConfigurationType) SetVpcEndpointServiceName(v string) *XksProxyConfigurationType {
+ s.VpcEndpointServiceName = &v
+ return s
+}
+
+// The request was rejected because the proxy credentials failed to authenticate
+// to the specified external key store proxy. The specified external key store
+// proxy rejected a status request from KMS due to invalid credentials. This
+// can indicate an error in the credentials or in the identification of the
+// external key store proxy.
+type XksProxyIncorrectAuthenticationCredentialException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyIncorrectAuthenticationCredentialException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyIncorrectAuthenticationCredentialException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyIncorrectAuthenticationCredentialException(v protocol.ResponseMetadata) error {
+ return &XksProxyIncorrectAuthenticationCredentialException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyIncorrectAuthenticationCredentialException) Code() string {
+ return "XksProxyIncorrectAuthenticationCredentialException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyIncorrectAuthenticationCredentialException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyIncorrectAuthenticationCredentialException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyIncorrectAuthenticationCredentialException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyIncorrectAuthenticationCredentialException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyIncorrectAuthenticationCredentialException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the external key store proxy is not configured
+// correctly. To identify the cause, see the error message that accompanies
+// the exception.
+type XksProxyInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksProxyInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyInvalidConfigurationException) Code() string {
+ return "XksProxyInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS cannot interpret the response it received from the external key store
+// proxy. The problem might be a poorly constructed response, but it could also
+// be a transient network issue. If you see this error repeatedly, report it
+// to the proxy vendor.
+type XksProxyInvalidResponseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidResponseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyInvalidResponseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyInvalidResponseException(v protocol.ResponseMetadata) error {
+ return &XksProxyInvalidResponseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyInvalidResponseException) Code() string {
+ return "XksProxyInvalidResponseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyInvalidResponseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyInvalidResponseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyInvalidResponseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyInvalidResponseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyInvalidResponseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the XksProxyUriEndpoint is already associated
+// with another external key store in this Amazon Web Services Region. To identify
+// the cause, see the error message that accompanies the exception.
+type XksProxyUriEndpointInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriEndpointInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriEndpointInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriEndpointInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriEndpointInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriEndpointInUseException) Code() string {
+ return "XksProxyUriEndpointInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriEndpointInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriEndpointInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriEndpointInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriEndpointInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriEndpointInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the concatenation of the XksProxyUriEndpoint
+// and XksProxyUriPath is already associated with another external key store
+// in this Amazon Web Services Region. Each external key store in a Region must
+// use a unique external key store proxy API address.
+type XksProxyUriInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriInUseException) Code() string {
+ return "XksProxyUriInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+// before you create the external key store or update its settings.
+//
+// This exception is also thrown when the external key store proxy response
+// to a GetHealthStatus request indicates that all external key manager instances
+// are unavailable.
+type XksProxyUriUnreachableException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriUnreachableException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyUriUnreachableException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyUriUnreachableException(v protocol.ResponseMetadata) error {
+ return &XksProxyUriUnreachableException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyUriUnreachableException) Code() string {
+ return "XksProxyUriUnreachableException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyUriUnreachableException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyUriUnreachableException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyUriUnreachableException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyUriUnreachableException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyUriUnreachableException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the specified Amazon VPC endpoint service
+// is already associated with another external key store in this Amazon Web
+// Services Region. Each external key store in a Region must use a different
+// Amazon VPC endpoint service.
+type XksProxyVpcEndpointServiceInUseException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInUseException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInUseException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceInUseException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceInUseException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceInUseException) Code() string {
+ return "XksProxyVpcEndpointServiceInUseException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceInUseException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceInUseException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceInUseException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceInUseException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because the Amazon VPC endpoint service configuration
+// does not fulfill the requirements for an external key store. To identify
+// the cause, see the error message that accompanies the exception and review
+// the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+// for Amazon VPC endpoint service connectivity for an external key store.
+type XksProxyVpcEndpointServiceInvalidConfigurationException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInvalidConfigurationException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceInvalidConfigurationException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceInvalidConfigurationException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceInvalidConfigurationException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Code() string {
+ return "XksProxyVpcEndpointServiceInvalidConfigurationException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceInvalidConfigurationException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The request was rejected because KMS could not find the specified VPC endpoint
+// service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+// for the external key store. Also, confirm that the Allow principals list
+// for the VPC endpoint service includes the KMS service principal for the Region,
+// such as cks.kms.us-east-1.amazonaws.com.
+type XksProxyVpcEndpointServiceNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s XksProxyVpcEndpointServiceNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorXksProxyVpcEndpointServiceNotFoundException(v protocol.ResponseMetadata) error {
+ return &XksProxyVpcEndpointServiceNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *XksProxyVpcEndpointServiceNotFoundException) Code() string {
+ return "XksProxyVpcEndpointServiceNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *XksProxyVpcEndpointServiceNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *XksProxyVpcEndpointServiceNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *XksProxyVpcEndpointServiceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *XksProxyVpcEndpointServiceNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *XksProxyVpcEndpointServiceNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+const (
// AlgorithmSpecRsaesPkcs1V15 is a AlgorithmSpec enum value
AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5"
@@ -18692,6 +23208,15 @@ const (
// AlgorithmSpecRsaesOaepSha256 is a AlgorithmSpec enum value
AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256"
+
+ // AlgorithmSpecRsaAesKeyWrapSha1 is a AlgorithmSpec enum value
+ AlgorithmSpecRsaAesKeyWrapSha1 = "RSA_AES_KEY_WRAP_SHA_1"
+
+ // AlgorithmSpecRsaAesKeyWrapSha256 is a AlgorithmSpec enum value
+ AlgorithmSpecRsaAesKeyWrapSha256 = "RSA_AES_KEY_WRAP_SHA_256"
+
+ // AlgorithmSpecSm2pke is a AlgorithmSpec enum value
+ AlgorithmSpecSm2pke = "SM2PKE"
)
// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum
@@ -18700,6 +23225,9 @@ func AlgorithmSpec_Values() []string {
AlgorithmSpecRsaesPkcs1V15,
AlgorithmSpecRsaesOaepSha1,
AlgorithmSpecRsaesOaepSha256,
+ AlgorithmSpecRsaAesKeyWrapSha1,
+ AlgorithmSpecRsaAesKeyWrapSha256,
+ AlgorithmSpecSm2pke,
}
}
@@ -18733,6 +23261,30 @@ const (
// ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet is a ConnectionErrorCodeType enum value
ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET"
+
+ // ConnectionErrorCodeTypeXksProxyAccessDenied is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyAccessDenied = "XKS_PROXY_ACCESS_DENIED"
+
+ // ConnectionErrorCodeTypeXksProxyNotReachable is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyNotReachable = "XKS_PROXY_NOT_REACHABLE"
+
+ // ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound = "XKS_VPC_ENDPOINT_SERVICE_NOT_FOUND"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidResponse is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidResponse = "XKS_PROXY_INVALID_RESPONSE"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidConfiguration = "XKS_PROXY_INVALID_CONFIGURATION"
+
+ // ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration = "XKS_VPC_ENDPOINT_SERVICE_INVALID_CONFIGURATION"
+
+ // ConnectionErrorCodeTypeXksProxyTimedOut is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyTimedOut = "XKS_PROXY_TIMED_OUT"
+
+ // ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration is a ConnectionErrorCodeType enum value
+ ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration = "XKS_PROXY_INVALID_TLS_CONFIGURATION"
)
// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum
@@ -18748,6 +23300,14 @@ func ConnectionErrorCodeType_Values() []string {
ConnectionErrorCodeTypeUserLoggedIn,
ConnectionErrorCodeTypeSubnetNotFound,
ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet,
+ ConnectionErrorCodeTypeXksProxyAccessDenied,
+ ConnectionErrorCodeTypeXksProxyNotReachable,
+ ConnectionErrorCodeTypeXksVpcEndpointServiceNotFound,
+ ConnectionErrorCodeTypeXksProxyInvalidResponse,
+ ConnectionErrorCodeTypeXksProxyInvalidConfiguration,
+ ConnectionErrorCodeTypeXksVpcEndpointServiceInvalidConfiguration,
+ ConnectionErrorCodeTypeXksProxyTimedOut,
+ ConnectionErrorCodeTypeXksProxyInvalidTlsConfiguration,
}
}
@@ -18779,6 +23339,22 @@ func ConnectionStateType_Values() []string {
}
}
+const (
+ // CustomKeyStoreTypeAwsCloudhsm is a CustomKeyStoreType enum value
+ CustomKeyStoreTypeAwsCloudhsm = "AWS_CLOUDHSM"
+
+ // CustomKeyStoreTypeExternalKeyStore is a CustomKeyStoreType enum value
+ CustomKeyStoreTypeExternalKeyStore = "EXTERNAL_KEY_STORE"
+)
+
+// CustomKeyStoreType_Values returns all elements of the CustomKeyStoreType enum
+func CustomKeyStoreType_Values() []string {
+ return []string{
+ CustomKeyStoreTypeAwsCloudhsm,
+ CustomKeyStoreTypeExternalKeyStore,
+ }
+}
+
const (
// CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value
CustomerMasterKeySpecRsa2048 = "RSA_2048"
@@ -18983,6 +23559,9 @@ const (
// GrantOperationVerifyMac is a GrantOperation enum value
GrantOperationVerifyMac = "VerifyMac"
+
+ // GrantOperationDeriveSharedSecret is a GrantOperation enum value
+ GrantOperationDeriveSharedSecret = "DeriveSharedSecret"
)
// GrantOperation_Values returns all elements of the GrantOperation enum
@@ -19004,6 +23583,31 @@ func GrantOperation_Values() []string {
GrantOperationGenerateDataKeyPairWithoutPlaintext,
GrantOperationGenerateMac,
GrantOperationVerifyMac,
+ GrantOperationDeriveSharedSecret,
+ }
+}
+
+const (
+ // KeyAgreementAlgorithmSpecEcdh is a KeyAgreementAlgorithmSpec enum value
+ KeyAgreementAlgorithmSpecEcdh = "ECDH"
+)
+
+// KeyAgreementAlgorithmSpec_Values returns all elements of the KeyAgreementAlgorithmSpec enum
+func KeyAgreementAlgorithmSpec_Values() []string {
+ return []string{
+ KeyAgreementAlgorithmSpecEcdh,
+ }
+}
+
+const (
+ // KeyEncryptionMechanismRsaesOaepSha256 is a KeyEncryptionMechanism enum value
+ KeyEncryptionMechanismRsaesOaepSha256 = "RSAES_OAEP_SHA_256"
+)
+
+// KeyEncryptionMechanism_Values returns all elements of the KeyEncryptionMechanism enum
+func KeyEncryptionMechanism_Values() []string {
+ return []string{
+ KeyEncryptionMechanismRsaesOaepSha256,
}
}
@@ -19132,6 +23736,9 @@ const (
// KeyUsageTypeGenerateVerifyMac is a KeyUsageType enum value
KeyUsageTypeGenerateVerifyMac = "GENERATE_VERIFY_MAC"
+
+ // KeyUsageTypeKeyAgreement is a KeyUsageType enum value
+ KeyUsageTypeKeyAgreement = "KEY_AGREEMENT"
)
// KeyUsageType_Values returns all elements of the KeyUsageType enum
@@ -19140,6 +23747,7 @@ func KeyUsageType_Values() []string {
KeyUsageTypeSignVerify,
KeyUsageTypeEncryptDecrypt,
KeyUsageTypeGenerateVerifyMac,
+ KeyUsageTypeKeyAgreement,
}
}
@@ -19208,6 +23816,9 @@ const (
// OriginTypeAwsCloudhsm is a OriginType enum value
OriginTypeAwsCloudhsm = "AWS_CLOUDHSM"
+
+ // OriginTypeExternalKeyStore is a OriginType enum value
+ OriginTypeExternalKeyStore = "EXTERNAL_KEY_STORE"
)
// OriginType_Values returns all elements of the OriginType enum
@@ -19216,6 +23827,23 @@ func OriginType_Values() []string {
OriginTypeAwsKms,
OriginTypeExternal,
OriginTypeAwsCloudhsm,
+ OriginTypeExternalKeyStore,
+ }
+}
+
+const (
+ // RotationTypeAutomatic is a RotationType enum value
+ RotationTypeAutomatic = "AUTOMATIC"
+
+ // RotationTypeOnDemand is a RotationType enum value
+ RotationTypeOnDemand = "ON_DEMAND"
+)
+
+// RotationType_Values returns all elements of the RotationType enum
+func RotationType_Values() []string {
+ return []string{
+ RotationTypeAutomatic,
+ RotationTypeOnDemand,
}
}
@@ -19270,11 +23898,39 @@ func SigningAlgorithmSpec_Values() []string {
const (
// WrappingKeySpecRsa2048 is a WrappingKeySpec enum value
WrappingKeySpecRsa2048 = "RSA_2048"
+
+ // WrappingKeySpecRsa3072 is a WrappingKeySpec enum value
+ WrappingKeySpecRsa3072 = "RSA_3072"
+
+ // WrappingKeySpecRsa4096 is a WrappingKeySpec enum value
+ WrappingKeySpecRsa4096 = "RSA_4096"
+
+ // WrappingKeySpecSm2 is a WrappingKeySpec enum value
+ WrappingKeySpecSm2 = "SM2"
)
// WrappingKeySpec_Values returns all elements of the WrappingKeySpec enum
func WrappingKeySpec_Values() []string {
return []string{
WrappingKeySpecRsa2048,
+ WrappingKeySpecRsa3072,
+ WrappingKeySpecRsa4096,
+ WrappingKeySpecSm2,
+ }
+}
+
+const (
+ // XksProxyConnectivityTypePublicEndpoint is a XksProxyConnectivityType enum value
+ XksProxyConnectivityTypePublicEndpoint = "PUBLIC_ENDPOINT"
+
+ // XksProxyConnectivityTypeVpcEndpointService is a XksProxyConnectivityType enum value
+ XksProxyConnectivityTypeVpcEndpointService = "VPC_ENDPOINT_SERVICE"
+)
+
+// XksProxyConnectivityType_Values returns all elements of the XksProxyConnectivityType enum
+func XksProxyConnectivityType_Values() []string {
+ return []string{
+ XksProxyConnectivityTypePublicEndpoint,
+ XksProxyConnectivityTypeVpcEndpointService,
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
index d926e08e6..babb91fc8 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go
@@ -8,7 +8,7 @@
// For general information about KMS, see the Key Management Service Developer
// Guide (https://docs.aws.amazon.com/kms/latest/developerguide/).
//
-// KMS is replacing the term customer master key (CMK) with KMS key and KMS
+// KMS has replaced the term customer master key (CMK) with KMS key and KMS
// key. The concept has not changed. To prevent breaking changes, KMS is keeping
// some variations of this term.
//
@@ -38,14 +38,14 @@
//
// # Signing Requests
//
-// Requests must be signed by using an access key ID and a secret access key.
-// We strongly recommend that you do not use your Amazon Web Services account
-// (root) access key ID and secret key for everyday work with KMS. Instead,
-// use the access key ID and secret access key for an IAM user. You can also
-// use the Amazon Web Services Security Token Service to generate temporary
-// security credentials that you can use to sign requests.
+// Requests must be signed using an access key ID and a secret access key. We
+// strongly recommend that you do not use your Amazon Web Services account root
+// access key ID and secret access key for everyday work. You can use the access
+// key ID and secret access key for an IAM user or you can use the Security
+// Token Service (STS) to generate temporary security credentials and use those
+// to sign requests.
//
-// All KMS operations require Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
+// All KMS requests must be signed with Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
//
// # Logging API Requests
//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
index 4f8fc2104..993fd2386 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go
@@ -19,12 +19,13 @@ const (
// "CloudHsmClusterInUseException".
//
// The request was rejected because the specified CloudHSM cluster is already
- // associated with a custom key store or it shares a backup history with a cluster
- // that is associated with a custom key store. Each custom key store must be
- // associated with a different CloudHSM cluster.
+ // associated with an CloudHSM key store in the account, or it shares a backup
+ // history with an CloudHSM key store in the account. Each CloudHSM key store
+ // in the account must be associated with a different CloudHSM cluster.
//
- // Clusters that share a backup history have the same cluster certificate. To
- // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // CloudHSM clusters that share a backup history have the same cluster certificate.
+ // To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
ErrCodeCloudHsmClusterInUseException = "CloudHsmClusterInUseException"
@@ -32,29 +33,29 @@ const (
// "CloudHsmClusterInvalidConfigurationException".
//
// The request was rejected because the associated CloudHSM cluster did not
- // meet the configuration requirements for a custom key store.
+ // meet the configuration requirements for an CloudHSM key store.
//
- // * The cluster must be configured with private subnets in at least two
- // different Availability Zones in the Region.
+ // * The CloudHSM cluster must be configured with private subnets in at least
+ // two different Availability Zones in the Region.
//
// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html)
// (cloudhsm-cluster--sg) must include inbound rules and outbound
// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound
// rules and the Destination in the outbound rules must match the security
- // group ID. These rules are set by default when you create the cluster.
- // Do not delete or change them. To get information about a particular security
- // group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
+ // group ID. These rules are set by default when you create the CloudHSM
+ // cluster. Do not delete or change them. To get information about a particular
+ // security group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html)
// operation.
//
- // * The cluster must contain at least as many HSMs as the operation requires.
- // To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
+ // * The CloudHSM cluster must contain at least as many HSMs as the operation
+ // requires. To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html)
// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey
// operations, the CloudHSM cluster must have at least two active HSMs, each
// in a different Availability Zone. For the ConnectCustomKeyStore operation,
// the CloudHSM must contain at least one active HSM.
//
// For information about the requirements for an CloudHSM cluster that is associated
- // with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
+ // with an CloudHSM key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore)
// in the Key Management Service Developer Guide. For information about creating
// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html)
// in the CloudHSM User Guide. For information about cluster security groups,
@@ -65,10 +66,9 @@ const (
// ErrCodeCloudHsmClusterNotActiveException for service response error code
// "CloudHsmClusterNotActiveException".
//
- // The request was rejected because the CloudHSM cluster that is associated
- // with the custom key store is not active. Initialize and activate the cluster
- // and try the command again. For detailed instructions, see Getting Started
- // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
+ // The request was rejected because the CloudHSM cluster associated with the
+ // CloudHSM key store is not active. Initialize and activate the cluster and
+ // try the command again. For detailed instructions, see Getting Started (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html)
// in the CloudHSM User Guide.
ErrCodeCloudHsmClusterNotActiveException = "CloudHsmClusterNotActiveException"
@@ -84,18 +84,26 @@ const (
//
// The request was rejected because the specified CloudHSM cluster has a different
// cluster certificate than the original cluster. You cannot use the operation
- // to specify an unrelated cluster.
+ // to specify an unrelated cluster for an CloudHSM key store.
//
- // Specify a cluster that shares a backup history with the original cluster.
- // This includes clusters that were created from a backup of the current cluster,
- // and clusters that were created from the same backup that produced the current
- // cluster.
+ // Specify an CloudHSM cluster that shares a backup history with the original
+ // cluster. This includes clusters that were created from a backup of the current
+ // cluster, and clusters that were created from the same backup that produced
+ // the current cluster.
//
- // Clusters that share a backup history have the same cluster certificate. To
- // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
+ // CloudHSM clusters that share a backup history have the same cluster certificate.
+ // To view the cluster certificate of an CloudHSM cluster, use the DescribeClusters
+ // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html)
// operation.
ErrCodeCloudHsmClusterNotRelatedException = "CloudHsmClusterNotRelatedException"
+ // ErrCodeConflictException for service response error code
+ // "ConflictException".
+ //
+ // The request was rejected because an automatic rotation of this key is currently
+ // in progress or scheduled to begin within the next 20 minutes.
+ ErrCodeConflictException = "ConflictException"
+
// ErrCodeCustomKeyStoreHasCMKsException for service response error code
// "CustomKeyStoreHasCMKsException".
//
@@ -114,17 +122,27 @@ const (
//
// This exception is thrown under the following conditions:
//
- // * You requested the CreateKey or GenerateRandom operation in a custom
- // key store that is not connected. These operations are valid only when
- // the custom key store ConnectionState is CONNECTED.
+ // * You requested the ConnectCustomKeyStore operation on a custom key store
+ // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
+ // for all other ConnectionState values. To reconnect a custom key store
+ // in a FAILED state, disconnect it (DisconnectCustomKeyStore), then connect
+ // it (ConnectCustomKeyStore).
+ //
+ // * You requested the CreateKey operation in a custom key store that is
+ // not connected. This operations is valid only when the custom key store
+ // ConnectionState is CONNECTED.
+ //
+ // * You requested the DisconnectCustomKeyStore operation on a custom key
+ // store with a ConnectionState of DISCONNECTING or DISCONNECTED. This operation
+ // is valid for all other ConnectionState values.
//
// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation
// on a custom key store that is not disconnected. This operation is valid
// only when the custom key store ConnectionState is DISCONNECTED.
//
- // * You requested the ConnectCustomKeyStore operation on a custom key store
- // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid
- // for all other ConnectionState values.
+ // * You requested the GenerateRandom operation in an CloudHSM key store
+ // that is not connected. This operation is valid only when the CloudHSM
+ // key store ConnectionState is CONNECTED.
ErrCodeCustomKeyStoreInvalidStateException = "CustomKeyStoreInvalidStateException"
// ErrCodeCustomKeyStoreNameInUseException for service response error code
@@ -145,8 +163,8 @@ const (
// ErrCodeDependencyTimeoutException for service response error code
// "DependencyTimeoutException".
//
- // The system timed out while trying to fulfill the request. The request can
- // be retried.
+ // The system timed out while trying to fulfill the request. You can retry the
+ // request.
ErrCodeDependencyTimeoutException = "DependencyTimeoutException"
// ErrCodeDisabledException for service response error code
@@ -155,6 +173,12 @@ const (
// The request was rejected because the specified KMS key is not enabled.
ErrCodeDisabledException = "DisabledException"
+ // ErrCodeDryRunOperationException for service response error code
+ // "DryRunOperationException".
+ //
+ // The request was rejected because the DryRun parameter was specified.
+ ErrCodeDryRunOperationException = "DryRunOperationException"
+
// ErrCodeExpiredImportTokenException for service response error code
// "ExpiredImportTokenException".
//
@@ -183,9 +207,10 @@ const (
// "IncorrectTrustAnchorException".
//
// The request was rejected because the trust anchor certificate in the request
- // is not the trust anchor certificate for the specified CloudHSM cluster.
+ // to create an CloudHSM key store is not the trust anchor certificate for the
+ // specified CloudHSM cluster.
//
- // When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
+ // When you initialize the CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr),
// you create the trust anchor certificate and save it in the customerCA.crt
// file.
ErrCodeIncorrectTrustAnchorException = "IncorrectTrustAnchorException"
@@ -254,7 +279,8 @@ const (
// For encrypting, decrypting, re-encrypting, and generating data keys, the
// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the
// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication
- // codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage
+ // codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. For deriving key
+ // agreement secrets, the KeyUsage must be KEY_AGREEMENT. To find the KeyUsage
// of a KMS key, use the DescribeKey operation.
//
// To find the encryption or signing algorithms supported for a particular KMS
@@ -274,9 +300,17 @@ const (
// The request was rejected because the state of the specified resource is not
// valid for this request.
//
- // For more information about how key state affects the use of a KMS key, see
- // Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
- // in the Key Management Service Developer Guide .
+ // This exceptions means one of the following:
+ //
+ // * The key state of the KMS key is not compatible with the operation. To
+ // find the key state, use the DescribeKey operation. For more information
+ // about which key states are compatible with each KMS operation, see Key
+ // states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
+ // in the Key Management Service Developer Guide .
+ //
+ // * For cryptographic operations on KMS keys in custom key stores, this
+ // exception represents a general failure with many possible causes. To identify
+ // the cause, see the error message that accompanies the exception.
ErrCodeInvalidStateException = "KMSInvalidStateException"
// ErrCodeKMSInvalidMacException for service response error code
@@ -336,41 +370,173 @@ const (
// The request was rejected because a specified parameter is not supported or
// a specified resource is not valid for this operation.
ErrCodeUnsupportedOperationException = "UnsupportedOperationException"
+
+ // ErrCodeXksKeyAlreadyInUseException for service response error code
+ // "XksKeyAlreadyInUseException".
+ //
+ // The request was rejected because the (XksKeyId) is already associated with
+ // another KMS key in this external key store. Each KMS key in an external key
+ // store must be associated with a different external key.
+ ErrCodeXksKeyAlreadyInUseException = "XksKeyAlreadyInUseException"
+
+ // ErrCodeXksKeyInvalidConfigurationException for service response error code
+ // "XksKeyInvalidConfigurationException".
+ //
+ // The request was rejected because the external key specified by the XksKeyId
+ // parameter did not meet the configuration requirements for an external key
+ // store.
+ //
+ // The external key must be an AES-256 symmetric key that is enabled and performs
+ // encryption and decryption.
+ ErrCodeXksKeyInvalidConfigurationException = "XksKeyInvalidConfigurationException"
+
+ // ErrCodeXksKeyNotFoundException for service response error code
+ // "XksKeyNotFoundException".
+ //
+ // The request was rejected because the external key store proxy could not find
+ // the external key. This exception is thrown when the value of the XksKeyId
+ // parameter doesn't identify a key in the external key manager associated with
+ // the external key proxy.
+ //
+ // Verify that the XksKeyId represents an existing key in the external key manager.
+ // Use the key identifier that the external key store proxy uses to identify
+ // the key. For details, see the documentation provided with your external key
+ // store proxy or key manager.
+ ErrCodeXksKeyNotFoundException = "XksKeyNotFoundException"
+
+ // ErrCodeXksProxyIncorrectAuthenticationCredentialException for service response error code
+ // "XksProxyIncorrectAuthenticationCredentialException".
+ //
+ // The request was rejected because the proxy credentials failed to authenticate
+ // to the specified external key store proxy. The specified external key store
+ // proxy rejected a status request from KMS due to invalid credentials. This
+ // can indicate an error in the credentials or in the identification of the
+ // external key store proxy.
+ ErrCodeXksProxyIncorrectAuthenticationCredentialException = "XksProxyIncorrectAuthenticationCredentialException"
+
+ // ErrCodeXksProxyInvalidConfigurationException for service response error code
+ // "XksProxyInvalidConfigurationException".
+ //
+ // The request was rejected because the external key store proxy is not configured
+ // correctly. To identify the cause, see the error message that accompanies
+ // the exception.
+ ErrCodeXksProxyInvalidConfigurationException = "XksProxyInvalidConfigurationException"
+
+ // ErrCodeXksProxyInvalidResponseException for service response error code
+ // "XksProxyInvalidResponseException".
+ //
+ //
+ // KMS cannot interpret the response it received from the external key store
+ // proxy. The problem might be a poorly constructed response, but it could also
+ // be a transient network issue. If you see this error repeatedly, report it
+ // to the proxy vendor.
+ ErrCodeXksProxyInvalidResponseException = "XksProxyInvalidResponseException"
+
+ // ErrCodeXksProxyUriEndpointInUseException for service response error code
+ // "XksProxyUriEndpointInUseException".
+ //
+ // The request was rejected because the XksProxyUriEndpoint is already associated
+ // with another external key store in this Amazon Web Services Region. To identify
+ // the cause, see the error message that accompanies the exception.
+ ErrCodeXksProxyUriEndpointInUseException = "XksProxyUriEndpointInUseException"
+
+ // ErrCodeXksProxyUriInUseException for service response error code
+ // "XksProxyUriInUseException".
+ //
+ // The request was rejected because the concatenation of the XksProxyUriEndpoint
+ // and XksProxyUriPath is already associated with another external key store
+ // in this Amazon Web Services Region. Each external key store in a Region must
+ // use a unique external key store proxy API address.
+ ErrCodeXksProxyUriInUseException = "XksProxyUriInUseException"
+
+ // ErrCodeXksProxyUriUnreachableException for service response error code
+ // "XksProxyUriUnreachableException".
+ //
+ // KMS was unable to reach the specified XksProxyUriPath. The path must be reachable
+ // before you create the external key store or update its settings.
+ //
+ // This exception is also thrown when the external key store proxy response
+ // to a GetHealthStatus request indicates that all external key manager instances
+ // are unavailable.
+ ErrCodeXksProxyUriUnreachableException = "XksProxyUriUnreachableException"
+
+ // ErrCodeXksProxyVpcEndpointServiceInUseException for service response error code
+ // "XksProxyVpcEndpointServiceInUseException".
+ //
+ // The request was rejected because the specified Amazon VPC endpoint service
+ // is already associated with another external key store in this Amazon Web
+ // Services Region. Each external key store in a Region must use a different
+ // Amazon VPC endpoint service.
+ ErrCodeXksProxyVpcEndpointServiceInUseException = "XksProxyVpcEndpointServiceInUseException"
+
+ // ErrCodeXksProxyVpcEndpointServiceInvalidConfigurationException for service response error code
+ // "XksProxyVpcEndpointServiceInvalidConfigurationException".
+ //
+ // The request was rejected because the Amazon VPC endpoint service configuration
+ // does not fulfill the requirements for an external key store. To identify
+ // the cause, see the error message that accompanies the exception and review
+ // the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements)
+ // for Amazon VPC endpoint service connectivity for an external key store.
+ ErrCodeXksProxyVpcEndpointServiceInvalidConfigurationException = "XksProxyVpcEndpointServiceInvalidConfigurationException"
+
+ // ErrCodeXksProxyVpcEndpointServiceNotFoundException for service response error code
+ // "XksProxyVpcEndpointServiceNotFoundException".
+ //
+ // The request was rejected because KMS could not find the specified VPC endpoint
+ // service. Use DescribeCustomKeyStores to verify the VPC endpoint service name
+ // for the external key store. Also, confirm that the Allow principals list
+ // for the VPC endpoint service includes the KMS service principal for the Region,
+ // such as cks.kms.us-east-1.amazonaws.com.
+ ErrCodeXksProxyVpcEndpointServiceNotFoundException = "XksProxyVpcEndpointServiceNotFoundException"
)
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "AlreadyExistsException": newErrorAlreadyExistsException,
- "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException,
- "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException,
- "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException,
- "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException,
- "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException,
- "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException,
- "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException,
- "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException,
- "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException,
- "DependencyTimeoutException": newErrorDependencyTimeoutException,
- "DisabledException": newErrorDisabledException,
- "ExpiredImportTokenException": newErrorExpiredImportTokenException,
- "IncorrectKeyException": newErrorIncorrectKeyException,
- "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException,
- "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException,
- "KMSInternalException": newErrorInternalException,
- "InvalidAliasNameException": newErrorInvalidAliasNameException,
- "InvalidArnException": newErrorInvalidArnException,
- "InvalidCiphertextException": newErrorInvalidCiphertextException,
- "InvalidGrantIdException": newErrorInvalidGrantIdException,
- "InvalidGrantTokenException": newErrorInvalidGrantTokenException,
- "InvalidImportTokenException": newErrorInvalidImportTokenException,
- "InvalidKeyUsageException": newErrorInvalidKeyUsageException,
- "InvalidMarkerException": newErrorInvalidMarkerException,
- "KMSInvalidStateException": newErrorInvalidStateException,
- "KMSInvalidMacException": newErrorKMSInvalidMacException,
- "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException,
- "KeyUnavailableException": newErrorKeyUnavailableException,
- "LimitExceededException": newErrorLimitExceededException,
- "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException,
- "NotFoundException": newErrorNotFoundException,
- "TagException": newErrorTagException,
- "UnsupportedOperationException": newErrorUnsupportedOperationException,
+ "AlreadyExistsException": newErrorAlreadyExistsException,
+ "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException,
+ "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException,
+ "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException,
+ "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException,
+ "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException,
+ "ConflictException": newErrorConflictException,
+ "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException,
+ "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException,
+ "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException,
+ "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException,
+ "DependencyTimeoutException": newErrorDependencyTimeoutException,
+ "DisabledException": newErrorDisabledException,
+ "DryRunOperationException": newErrorDryRunOperationException,
+ "ExpiredImportTokenException": newErrorExpiredImportTokenException,
+ "IncorrectKeyException": newErrorIncorrectKeyException,
+ "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException,
+ "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException,
+ "KMSInternalException": newErrorInternalException,
+ "InvalidAliasNameException": newErrorInvalidAliasNameException,
+ "InvalidArnException": newErrorInvalidArnException,
+ "InvalidCiphertextException": newErrorInvalidCiphertextException,
+ "InvalidGrantIdException": newErrorInvalidGrantIdException,
+ "InvalidGrantTokenException": newErrorInvalidGrantTokenException,
+ "InvalidImportTokenException": newErrorInvalidImportTokenException,
+ "InvalidKeyUsageException": newErrorInvalidKeyUsageException,
+ "InvalidMarkerException": newErrorInvalidMarkerException,
+ "KMSInvalidStateException": newErrorInvalidStateException,
+ "KMSInvalidMacException": newErrorKMSInvalidMacException,
+ "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException,
+ "KeyUnavailableException": newErrorKeyUnavailableException,
+ "LimitExceededException": newErrorLimitExceededException,
+ "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException,
+ "NotFoundException": newErrorNotFoundException,
+ "TagException": newErrorTagException,
+ "UnsupportedOperationException": newErrorUnsupportedOperationException,
+ "XksKeyAlreadyInUseException": newErrorXksKeyAlreadyInUseException,
+ "XksKeyInvalidConfigurationException": newErrorXksKeyInvalidConfigurationException,
+ "XksKeyNotFoundException": newErrorXksKeyNotFoundException,
+ "XksProxyIncorrectAuthenticationCredentialException": newErrorXksProxyIncorrectAuthenticationCredentialException,
+ "XksProxyInvalidConfigurationException": newErrorXksProxyInvalidConfigurationException,
+ "XksProxyInvalidResponseException": newErrorXksProxyInvalidResponseException,
+ "XksProxyUriEndpointInUseException": newErrorXksProxyUriEndpointInUseException,
+ "XksProxyUriInUseException": newErrorXksProxyUriInUseException,
+ "XksProxyUriUnreachableException": newErrorXksProxyUriUnreachableException,
+ "XksProxyVpcEndpointServiceInUseException": newErrorXksProxyVpcEndpointServiceInUseException,
+ "XksProxyVpcEndpointServiceInvalidConfigurationException": newErrorXksProxyVpcEndpointServiceInvalidConfigurationException,
+ "XksProxyVpcEndpointServiceNotFoundException": newErrorXksProxyVpcEndpointServiceNotFoundException,
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go
index d76a80495..9d2eb17ae 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go
@@ -88,7 +88,7 @@ func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ
// strictly as a string of characters.
//
// For more information about using tags with Amazon Elastic Compute Cloud (Amazon
-// EC2) instances, see Tagging your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
+// EC2) instances, see Tag your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -205,8 +205,7 @@ func (c *SSM) AssociateOpsItemRelatedItemRequest(input *AssociateOpsItemRelatedI
// The specified OpsItem ID doesn't exist. Verify the ID and try again.
//
// - OpsItemLimitExceededException
-// The request caused OpsItems to exceed one or more quotas. For information
-// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+// The request caused OpsItems to exceed one or more quotas.
//
// - OpsItemInvalidParameterException
// A specified parameter argument isn't valid. Verify the available arguments
@@ -215,6 +214,9 @@ func (c *SSM) AssociateOpsItemRelatedItemRequest(input *AssociateOpsItemRelatedI
// - OpsItemRelatedItemAlreadyExistsException
// The Amazon Resource Name (ARN) is already associated with the OpsItem.
//
+// - OpsItemConflictException
+// The specified OpsItem is in the process of being deleted.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/AssociateOpsItemRelatedItem
func (c *SSM) AssociateOpsItemRelatedItem(input *AssociateOpsItemRelatedItemInput) (*AssociateOpsItemRelatedItemOutput, error) {
req, out := c.AssociateOpsItemRelatedItemRequest(input)
@@ -304,14 +306,15 @@ func (c *SSM) CancelCommandRequest(input *CancelCommandInput) (req *request.Requ
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - DuplicateInstanceId
// You can't specify a managed node ID in more than one association.
@@ -477,7 +480,7 @@ func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *reques
// use the activation code and ID when installing SSM Agent on machines in your
// hybrid environment. For more information about requirements for managing
// on-premises machines using Systems Manager, see Setting up Amazon Web Services
-// Systems Manager for hybrid environments (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html)
+// Systems Manager for hybrid and multicloud environments (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises
@@ -609,19 +612,20 @@ func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *requ
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - UnsupportedPlatformType
// The document doesn't support the platform type of the given managed node
-// ID(s). For example, you sent an document for a Windows managed node to a
-// Linux node.
+// IDs. For example, you sent an document for a Windows managed node to a Linux
+// node.
//
// - InvalidOutputLocation
// The output location isn't valid or doesn't exist.
@@ -642,7 +646,7 @@ func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *requ
// TargetMap parameter isn't valid.
//
// - InvalidTag
-// The specified tag key or value is not valid.
+// The specified tag key or value isn't valid.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateAssociation
func (c *SSM) CreateAssociation(input *CreateAssociationInput) (*CreateAssociationOutput, error) {
@@ -742,14 +746,15 @@ func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput)
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidParameters
// You must specify values for all required parameters in the Amazon Web Services
@@ -764,8 +769,8 @@ func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput)
//
// - UnsupportedPlatformType
// The document doesn't support the platform type of the given managed node
-// ID(s). For example, you sent an document for a Windows managed node to a
-// Linux node.
+// IDs. For example, you sent an document for a Windows managed node to a Linux
+// node.
//
// - InvalidOutputLocation
// The output location isn't valid or doesn't exist.
@@ -1043,8 +1048,8 @@ func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Requ
// CreateOpsItem API operation for Amazon Simple Systems Manager (SSM).
//
// Creates a new OpsItem. You must have permission in Identity and Access Management
-// (IAM) to create a new OpsItem. For more information, see Getting started
-// with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
+// (IAM) to create a new OpsItem. For more information, see Set up OpsCenter
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setup.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Operations engineers and IT professionals use Amazon Web Services Systems
@@ -1069,13 +1074,17 @@ func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Requ
// The OpsItem already exists.
//
// - OpsItemLimitExceededException
-// The request caused OpsItems to exceed one or more quotas. For information
-// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+// The request caused OpsItems to exceed one or more quotas.
//
// - OpsItemInvalidParameterException
// A specified parameter argument isn't valid. Verify the available arguments
// and try again.
//
+// - OpsItemAccessDeniedException
+// You don't have permission to view OpsItems in the specified account. Verify
+// that your account is configured either as a Systems Manager delegated administrator
+// or that you are logged into the Organizations management account.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CreateOpsItem
func (c *SSM) CreateOpsItem(input *CreateOpsItemInput) (*CreateOpsItemOutput, error) {
req, out := c.CreateOpsItemRequest(input)
@@ -1575,14 +1584,15 @@ func (c *SSM) DeleteAssociationRequest(input *DeleteAssociationInput) (req *requ
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - TooManyUpdates
// There are concurrent updates for a resource that supports one update at a
@@ -1878,6 +1888,112 @@ func (c *SSM) DeleteMaintenanceWindowWithContext(ctx aws.Context, input *DeleteM
return out, req.Send()
}
+const opDeleteOpsItem = "DeleteOpsItem"
+
+// DeleteOpsItemRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteOpsItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteOpsItem for more information on using the DeleteOpsItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteOpsItemRequest method.
+// req, resp := client.DeleteOpsItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteOpsItem
+func (c *SSM) DeleteOpsItemRequest(input *DeleteOpsItemInput) (req *request.Request, output *DeleteOpsItemOutput) {
+ op := &request.Operation{
+ Name: opDeleteOpsItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteOpsItemInput{}
+ }
+
+ output = &DeleteOpsItemOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteOpsItem API operation for Amazon Simple Systems Manager (SSM).
+//
+// Delete an OpsItem. You must have permission in Identity and Access Management
+// (IAM) to delete an OpsItem.
+//
+// Note the following important information about this operation.
+//
+// - Deleting an OpsItem is irreversible. You can't restore a deleted OpsItem.
+//
+// - This operation uses an eventual consistency model, which means the system
+// can take a few minutes to complete this operation. If you delete an OpsItem
+// and immediately call, for example, GetOpsItem, the deleted OpsItem might
+// still appear in the response.
+//
+// - This operation is idempotent. The system doesn't throw an exception
+// if you repeatedly call this operation for the same OpsItem. If the first
+// call is successful, all additional calls return the same successful response
+// as the first call.
+//
+// - This operation doesn't support cross-account calls. A delegated administrator
+// or management account can't delete OpsItems in other accounts, even if
+// OpsCenter has been set up for cross-account administration. For more information
+// about cross-account administration, see Setting up OpsCenter to centrally
+// manage OpsItems across accounts (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setting-up-cross-account.html)
+// in the Systems Manager User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s
+// API operation DeleteOpsItem for usage and error information.
+//
+// Returned Error Types:
+//
+// - InternalServerError
+// An error occurred on the server side.
+//
+// - OpsItemInvalidParameterException
+// A specified parameter argument isn't valid. Verify the available arguments
+// and try again.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteOpsItem
+func (c *SSM) DeleteOpsItem(input *DeleteOpsItemInput) (*DeleteOpsItemOutput, error) {
+ req, out := c.DeleteOpsItemRequest(input)
+ return out, req.Send()
+}
+
+// DeleteOpsItemWithContext is the same as DeleteOpsItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteOpsItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) DeleteOpsItemWithContext(ctx aws.Context, input *DeleteOpsItemInput, opts ...request.Option) (*DeleteOpsItemOutput, error) {
+ req, out := c.DeleteOpsItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteOpsMetadata = "DeleteOpsMetadata"
// DeleteOpsMetadataRequest generates a "aws/request.Request" representing the
@@ -2298,6 +2414,118 @@ func (c *SSM) DeleteResourceDataSyncWithContext(ctx aws.Context, input *DeleteRe
return out, req.Send()
}
+const opDeleteResourcePolicy = "DeleteResourcePolicy"
+
+// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteResourcePolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DeleteResourcePolicyRequest method.
+// req, resp := client.DeleteResourcePolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteResourcePolicy
+func (c *SSM) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteResourcePolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteResourcePolicyInput{}
+ }
+
+ output = &DeleteResourcePolicyOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteResourcePolicy API operation for Amazon Simple Systems Manager (SSM).
+//
+// Deletes a Systems Manager resource policy. A resource policy helps you to
+// define the IAM entity (for example, an Amazon Web Services account) that
+// can manage your Systems Manager resources. The following resources support
+// Systems Manager resource policies.
+//
+// - OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web
+// Services accounts to view and interact with OpsCenter operational work
+// items (OpsItems).
+//
+// - Parameter - The resource policy is used to share a parameter with other
+// accounts using Resource Access Manager (RAM). For more information about
+// cross-account sharing of parameters, see Working with shared parameters
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-shared-parameters.html)
+// in the Amazon Web Services Systems Manager User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s
+// API operation DeleteResourcePolicy for usage and error information.
+//
+// Returned Error Types:
+//
+// - InternalServerError
+// An error occurred on the server side.
+//
+// - ResourcePolicyInvalidParameterException
+// One or more parameters specified for the call aren't valid. Verify the parameters
+// and their values and try again.
+//
+// - ResourcePolicyConflictException
+// The hash provided in the call doesn't match the stored hash. This exception
+// is thrown when trying to update an obsolete policy version or when multiple
+// requests to update a policy are sent.
+//
+// - ResourceNotFoundException
+// The specified parameter to be shared could not be found.
+//
+// - MalformedResourcePolicyDocumentException
+// The specified policy document is malformed or invalid, or excessive PutResourcePolicy
+// or DeleteResourcePolicy calls have been made.
+//
+// - ResourcePolicyNotFoundException
+// No policies with the specified policy ID and hash could be found.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DeleteResourcePolicy
+func (c *SSM) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) {
+ req, out := c.DeleteResourcePolicyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteResourcePolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) {
+ req, out := c.DeleteResourcePolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeregisterManagedInstance = "DeregisterManagedInstance"
// DeregisterManagedInstanceRequest generates a "aws/request.Request" representing the
@@ -2360,14 +2588,15 @@ func (c *SSM) DeregisterManagedInstanceRequest(input *DeregisterManagedInstanceI
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InternalServerError
// An error occurred on the server side.
@@ -2876,14 +3105,15 @@ func (c *SSM) DescribeAssociationRequest(input *DescribeAssociationInput) (req *
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeAssociation
func (c *SSM) DescribeAssociation(input *DescribeAssociationInput) (*DescribeAssociationOutput, error) {
@@ -3540,6 +3770,9 @@ func (c *SSM) DescribeAvailablePatchesRequest(input *DescribeAvailablePatchesInp
//
// Lists all patches eligible to be included in a patch baseline.
//
+// Currently, DescribeAvailablePatches supports only the Amazon Linux 1, Amazon
+// Linux 2, and Windows Server operating systems.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -3855,7 +4088,7 @@ func (c *SSM) DescribeEffectiveInstanceAssociationsRequest(input *DescribeEffect
// DescribeEffectiveInstanceAssociations API operation for Amazon Simple Systems Manager (SSM).
//
-// All associations for the managed node(s).
+// All associations for the managed nodes.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3874,14 +4107,15 @@ func (c *SSM) DescribeEffectiveInstanceAssociationsRequest(input *DescribeEffect
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidNextToken
// The specified token isn't valid.
@@ -4162,7 +4396,7 @@ func (c *SSM) DescribeInstanceAssociationsStatusRequest(input *DescribeInstanceA
// DescribeInstanceAssociationsStatus API operation for Amazon Simple Systems Manager (SSM).
//
-// The status of the associations for the managed node(s).
+// The status of the associations for the managed nodes.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4181,14 +4415,15 @@ func (c *SSM) DescribeInstanceAssociationsStatusRequest(input *DescribeInstanceA
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidNextToken
// The specified token isn't valid.
@@ -4315,18 +4550,19 @@ func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformat
// DescribeInstanceInformation API operation for Amazon Simple Systems Manager (SSM).
//
-// Describes one or more of your managed nodes, including information about
-// the operating system platform, the version of SSM Agent installed on the
-// managed node, node status, and so on.
+// Provides information about one or more of your managed nodes, including the
+// operating system platform, SSM Agent version, association status, and IP
+// address. This operation does not return information for nodes that are either
+// Stopped or Terminated.
//
-// If you specify one or more managed node IDs, it returns information for those
-// managed nodes. If you don't specify node IDs, it returns information for
-// all your managed nodes. If you specify a node ID that isn't valid or a node
-// that you don't own, you receive an error.
+// If you specify one or more node IDs, the operation returns information for
+// those managed nodes. If you don't specify node IDs, it returns information
+// for all your managed nodes. If you specify a node ID that isn't valid or
+// a node that you don't own, you receive an error.
//
-// The IamRole field for this API operation is the Identity and Access Management
-// (IAM) role assigned to on-premises managed nodes. This call doesn't return
-// the IAM role for EC2 instances.
+// The IamRole field returned for this API operation is the Identity and Access
+// Management (IAM) role assigned to on-premises managed nodes. This operation
+// does not return the IAM role for EC2 instances.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -4345,14 +4581,15 @@ func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformat
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidNextToken
// The specified token isn't valid.
@@ -4788,14 +5025,15 @@ func (c *SSM) DescribeInstancePatchesRequest(input *DescribeInstancePatchesInput
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidFilter
// The filter name isn't valid. Verify the you entered the correct name and
@@ -4877,6 +5115,174 @@ func (c *SSM) DescribeInstancePatchesPagesWithContext(ctx aws.Context, input *De
return p.Err()
}
+const opDescribeInstanceProperties = "DescribeInstanceProperties"
+
+// DescribeInstancePropertiesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeInstanceProperties operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeInstanceProperties for more information on using the DescribeInstanceProperties
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the DescribeInstancePropertiesRequest method.
+// req, resp := client.DescribeInstancePropertiesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceProperties
+func (c *SSM) DescribeInstancePropertiesRequest(input *DescribeInstancePropertiesInput) (req *request.Request, output *DescribeInstancePropertiesOutput) {
+ op := &request.Operation{
+ Name: opDescribeInstanceProperties,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeInstancePropertiesInput{}
+ }
+
+ output = &DescribeInstancePropertiesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeInstanceProperties API operation for Amazon Simple Systems Manager (SSM).
+//
+// An API operation used by the Systems Manager console to display information
+// about Systems Manager managed nodes.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s
+// API operation DescribeInstanceProperties for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidNextToken
+// The specified token isn't valid.
+//
+// - InvalidFilterKey
+// The specified key isn't valid.
+//
+// - InvalidInstanceId
+// The following problems can cause this exception:
+//
+// - You don't have permission to access the managed node.
+//
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
+// Verify that SSM Agent is running.
+//
+// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
+// Agent.
+//
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
+//
+// - InvalidActivationId
+// The activation ID isn't valid. Verify the you entered the correct ActivationId
+// or ActivationCode and try again.
+//
+// - InvalidInstancePropertyFilterValue
+// The specified filter value isn't valid.
+//
+// - InternalServerError
+// An error occurred on the server side.
+//
+// - InvalidDocument
+// The specified SSM document doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeInstanceProperties
+func (c *SSM) DescribeInstanceProperties(input *DescribeInstancePropertiesInput) (*DescribeInstancePropertiesOutput, error) {
+ req, out := c.DescribeInstancePropertiesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeInstancePropertiesWithContext is the same as DescribeInstanceProperties with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeInstanceProperties for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) DescribeInstancePropertiesWithContext(ctx aws.Context, input *DescribeInstancePropertiesInput, opts ...request.Option) (*DescribeInstancePropertiesOutput, error) {
+ req, out := c.DescribeInstancePropertiesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeInstancePropertiesPages iterates over the pages of a DescribeInstanceProperties operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeInstanceProperties method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeInstanceProperties operation.
+// pageNum := 0
+// err := client.DescribeInstancePropertiesPages(params,
+// func(page *ssm.DescribeInstancePropertiesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *SSM) DescribeInstancePropertiesPages(input *DescribeInstancePropertiesInput, fn func(*DescribeInstancePropertiesOutput, bool) bool) error {
+ return c.DescribeInstancePropertiesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeInstancePropertiesPagesWithContext same as DescribeInstancePropertiesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) DescribeInstancePropertiesPagesWithContext(ctx aws.Context, input *DescribeInstancePropertiesInput, fn func(*DescribeInstancePropertiesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeInstancePropertiesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeInstancePropertiesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*DescribeInstancePropertiesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opDescribeInventoryDeletions = "DescribeInventoryDeletions"
// DescribeInventoryDeletionsRequest generates a "aws/request.Request" representing the
@@ -6204,14 +6610,14 @@ func (c *SSM) DescribeOpsItemsRequest(input *DescribeOpsItemsInput) (req *reques
// DescribeOpsItems API operation for Amazon Simple Systems Manager (SSM).
//
// Query a set of OpsItems. You must have permission in Identity and Access
-// Management (IAM) to query a list of OpsItems. For more information, see Getting
-// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
+// Management (IAM) to query a list of OpsItems. For more information, see Set
+// up OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setup.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Operations engineers and IT professionals use Amazon Web Services Systems
// Manager OpsCenter to view, investigate, and remediate operational issues
// impacting the performance and health of their Amazon Web Services resources.
-// For more information, see OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
+// For more information, see Amazon Web Services Systems Manager OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6347,7 +6753,9 @@ func (c *SSM) DescribeParametersRequest(input *DescribeParametersInput) (req *re
// DescribeParameters API operation for Amazon Simple Systems Manager (SSM).
//
-// Get information about a parameter.
+// Lists the parameters in your Amazon Web Services account or the parameters
+// shared with you when you enable the Shared (https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_DescribeParameters.html#systemsmanager-DescribeParameters-request-Shared)
+// option.
//
// Request results are returned on a best-effort basis. If you specify MaxResults
// in the request, the response includes information up to the limit specified.
@@ -7209,6 +7617,9 @@ func (c *SSM) DisassociateOpsItemRelatedItemRequest(input *DisassociateOpsItemRe
// A specified parameter argument isn't valid. Verify the available arguments
// and try again.
//
+// - OpsItemConflictException
+// The specified OpsItem is in the process of being deleted.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DisassociateOpsItemRelatedItem
func (c *SSM) DisassociateOpsItemRelatedItem(input *DisassociateOpsItemRelatedItemInput) (*DisassociateOpsItemRelatedItemOutput, error) {
req, out := c.DisassociateOpsItemRelatedItemRequest(input)
@@ -7487,14 +7898,15 @@ func (c *SSM) GetCommandInvocationRequest(input *GetCommandInvocationInput) (req
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidPluginName
// The plugin name isn't valid.
@@ -7762,7 +8174,7 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP
// Patching for applications released by Microsoft is only available on EC2
// instances and advanced instances. To patch applications released by Microsoft
// on on-premises servers and VMs, you must enable advanced instances. For more
-// information, see Enabling the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
+// information, see Turning on the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance
@@ -8662,13 +9074,13 @@ func (c *SSM) GetOpsItemRequest(input *GetOpsItemInput) (req *request.Request, o
//
// Get information about an OpsItem by using the ID. You must have permission
// in Identity and Access Management (IAM) to view information about an OpsItem.
-// For more information, see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
+// For more information, see Set up OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setup.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Operations engineers and IT professionals use Amazon Web Services Systems
// Manager OpsCenter to view, investigate, and remediate operational issues
// impacting the performance and health of their Amazon Web Services resources.
-// For more information, see OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
+// For more information, see Amazon Web Services Systems Manager OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -8686,6 +9098,11 @@ func (c *SSM) GetOpsItemRequest(input *GetOpsItemInput) (req *request.Request, o
// - OpsItemNotFoundException
// The specified OpsItem ID doesn't exist. Verify the ID and try again.
//
+// - OpsItemAccessDeniedException
+// You don't have permission to view OpsItems in the specified account. Verify
+// that your account is configured either as a Systems Manager delegated administrator
+// or that you are logged into the Organizations management account.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetOpsItem
func (c *SSM) GetOpsItem(input *GetOpsItemInput) (*GetOpsItemOutput, error) {
req, out := c.GetOpsItemRequest(input)
@@ -9608,6 +10025,149 @@ func (c *SSM) GetPatchBaselineForPatchGroupWithContext(ctx aws.Context, input *G
return out, req.Send()
}
+const opGetResourcePolicies = "GetResourcePolicies"
+
+// GetResourcePoliciesRequest generates a "aws/request.Request" representing the
+// client's request for the GetResourcePolicies operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetResourcePolicies for more information on using the GetResourcePolicies
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the GetResourcePoliciesRequest method.
+// req, resp := client.GetResourcePoliciesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetResourcePolicies
+func (c *SSM) GetResourcePoliciesRequest(input *GetResourcePoliciesInput) (req *request.Request, output *GetResourcePoliciesOutput) {
+ op := &request.Operation{
+ Name: opGetResourcePolicies,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"NextToken"},
+ OutputTokens: []string{"NextToken"},
+ LimitToken: "MaxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &GetResourcePoliciesInput{}
+ }
+
+ output = &GetResourcePoliciesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetResourcePolicies API operation for Amazon Simple Systems Manager (SSM).
+//
+// Returns an array of the Policy object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s
+// API operation GetResourcePolicies for usage and error information.
+//
+// Returned Error Types:
+//
+// - InternalServerError
+// An error occurred on the server side.
+//
+// - ResourcePolicyInvalidParameterException
+// One or more parameters specified for the call aren't valid. Verify the parameters
+// and their values and try again.
+//
+// - ResourceNotFoundException
+// The specified parameter to be shared could not be found.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetResourcePolicies
+func (c *SSM) GetResourcePolicies(input *GetResourcePoliciesInput) (*GetResourcePoliciesOutput, error) {
+ req, out := c.GetResourcePoliciesRequest(input)
+ return out, req.Send()
+}
+
+// GetResourcePoliciesWithContext is the same as GetResourcePolicies with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetResourcePolicies for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) GetResourcePoliciesWithContext(ctx aws.Context, input *GetResourcePoliciesInput, opts ...request.Option) (*GetResourcePoliciesOutput, error) {
+ req, out := c.GetResourcePoliciesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// GetResourcePoliciesPages iterates over the pages of a GetResourcePolicies operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See GetResourcePolicies method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a GetResourcePolicies operation.
+// pageNum := 0
+// err := client.GetResourcePoliciesPages(params,
+// func(page *ssm.GetResourcePoliciesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *SSM) GetResourcePoliciesPages(input *GetResourcePoliciesInput, fn func(*GetResourcePoliciesOutput, bool) bool) error {
+ return c.GetResourcePoliciesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// GetResourcePoliciesPagesWithContext same as GetResourcePoliciesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) GetResourcePoliciesPagesWithContext(ctx aws.Context, input *GetResourcePoliciesInput, fn func(*GetResourcePoliciesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *GetResourcePoliciesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.GetResourcePoliciesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*GetResourcePoliciesOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opGetServiceSetting = "GetServiceSetting"
// GetServiceSettingRequest generates a "aws/request.Request" representing the
@@ -10188,14 +10748,15 @@ func (c *SSM) ListCommandInvocationsRequest(input *ListCommandInvocationsInput)
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidFilterKey
// The specified key isn't valid.
@@ -10347,14 +10908,15 @@ func (c *SSM) ListCommandsRequest(input *ListCommandsInput) (req *request.Reques
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidFilterKey
// The specified key isn't valid.
@@ -11171,14 +11733,15 @@ func (c *SSM) ListInventoryEntriesRequest(input *ListInventoryEntriesInput) (req
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidTypeNameException
// The parameter type name isn't valid.
@@ -11281,8 +11844,7 @@ func (c *SSM) ListOpsItemEventsRequest(input *ListOpsItemEventsInput) (req *requ
// The specified OpsItem ID doesn't exist. Verify the ID and try again.
//
// - OpsItemLimitExceededException
-// The request caused OpsItems to exceed one or more quotas. For information
-// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+// The request caused OpsItems to exceed one or more quotas.
//
// - OpsItemInvalidParameterException
// A specified parameter argument isn't valid. Verify the available arguments
@@ -12074,8 +12636,8 @@ func (c *SSM) ModifyDocumentPermissionRequest(input *ModifyDocumentPermissionInp
//
// Shares a Amazon Web Services Systems Manager document (SSM document)publicly
// or privately. If you share a document privately, you must specify the Amazon
-// Web Services user account IDs for those people who can use the document.
-// If you share a document publicly, you must specify All as the account ID.
+// Web Services user IDs for those people who can use the document. If you share
+// a document publicly, you must specify All as the account ID.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -12097,10 +12659,15 @@ func (c *SSM) ModifyDocumentPermissionRequest(input *ModifyDocumentPermissionInp
// type.
//
// - DocumentPermissionLimit
-// The document can't be shared with more Amazon Web Services user accounts.
-// You can share a document with a maximum of 20 accounts. You can publicly
-// share up to five documents. If you need to increase this limit, contact Amazon
-// Web Services Support.
+// The document can't be shared with more Amazon Web Services accounts. You
+// can specify a maximum of 20 accounts per API operation to share a private
+// document.
+//
+// By default, you can share a private document with a maximum of 1,000 accounts
+// and publicly share up to five documents.
+//
+// If you need to increase the quota for privately or publicly shared Systems
+// Manager documents, contact Amazon Web Services Support.
//
// - DocumentLimitExceeded
// You can have at most 500 active SSM documents.
@@ -12333,14 +12900,15 @@ func (c *SSM) PutInventoryRequest(input *PutInventoryInput) (req *request.Reques
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidTypeNameException
// The parameter type name isn't valid.
@@ -12549,6 +13117,140 @@ func (c *SSM) PutParameterWithContext(ctx aws.Context, input *PutParameterInput,
return out, req.Send()
}
+const opPutResourcePolicy = "PutResourcePolicy"
+
+// PutResourcePolicyRequest generates a "aws/request.Request" representing the
+// client's request for the PutResourcePolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutResourcePolicy for more information on using the PutResourcePolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the PutResourcePolicyRequest method.
+// req, resp := client.PutResourcePolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutResourcePolicy
+func (c *SSM) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) {
+ op := &request.Operation{
+ Name: opPutResourcePolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutResourcePolicyInput{}
+ }
+
+ output = &PutResourcePolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutResourcePolicy API operation for Amazon Simple Systems Manager (SSM).
+//
+// Creates or updates a Systems Manager resource policy. A resource policy helps
+// you to define the IAM entity (for example, an Amazon Web Services account)
+// that can manage your Systems Manager resources. The following resources support
+// Systems Manager resource policies.
+//
+// - OpsItemGroup - The resource policy for OpsItemGroup enables Amazon Web
+// Services accounts to view and interact with OpsCenter operational work
+// items (OpsItems).
+//
+// - Parameter - The resource policy is used to share a parameter with other
+// accounts using Resource Access Manager (RAM). To share a parameter, it
+// must be in the advanced parameter tier. For information about parameter
+// tiers, see Managing parameter tiers (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html).
+// For information about changing an existing standard parameter to an advanced
+// parameter, see Changing a standard parameter to an advanced parameter
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html#parameter-store-advanced-parameters-enabling).
+// To share a SecureString parameter, it must be encrypted with a customer
+// managed key, and you must share the key separately through Key Management
+// Service. Amazon Web Services managed keys cannot be shared. Parameters
+// encrypted with the default Amazon Web Services managed key can be updated
+// to use a customer managed key instead. For KMS key definitions, see KMS
+// concepts (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html)
+// in the Key Management Service Developer Guide. While you can share a parameter
+// using the Systems Manager PutResourcePolicy operation, we recommend using
+// Resource Access Manager (RAM) instead. This is because using PutResourcePolicy
+// requires the extra step of promoting the parameter to a standard RAM Resource
+// Share using the RAM PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html)
+// API operation. Otherwise, the parameter won't be returned by the Systems
+// Manager DescribeParameters (https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_DescribeParameters.html)
+// API operation using the --shared option. For more information, see Sharing
+// a parameter (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-shared-parameters.html#share)
+// in the Amazon Web Services Systems Manager User Guide
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Systems Manager (SSM)'s
+// API operation PutResourcePolicy for usage and error information.
+//
+// Returned Error Types:
+//
+// - InternalServerError
+// An error occurred on the server side.
+//
+// - ResourcePolicyInvalidParameterException
+// One or more parameters specified for the call aren't valid. Verify the parameters
+// and their values and try again.
+//
+// - ResourcePolicyLimitExceededException
+// The PutResourcePolicy API action enforces two limits. A policy can't be greater
+// than 1024 bytes in size. And only one policy can be attached to OpsItemGroup.
+// Verify these limits and try again.
+//
+// - ResourcePolicyConflictException
+// The hash provided in the call doesn't match the stored hash. This exception
+// is thrown when trying to update an obsolete policy version or when multiple
+// requests to update a policy are sent.
+//
+// - ResourceNotFoundException
+// The specified parameter to be shared could not be found.
+//
+// - MalformedResourcePolicyDocumentException
+// The specified policy document is malformed or invalid, or excessive PutResourcePolicy
+// or DeleteResourcePolicy calls have been made.
+//
+// - ResourcePolicyNotFoundException
+// No policies with the specified policy ID and hash could be found.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/PutResourcePolicy
+func (c *SSM) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) {
+ req, out := c.PutResourcePolicyRequest(input)
+ return out, req.Send()
+}
+
+// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutResourcePolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSM) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) {
+ req, out := c.PutResourcePolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opRegisterDefaultPatchBaseline = "RegisterDefaultPatchBaseline"
// RegisterDefaultPatchBaselineRequest generates a "aws/request.Request" representing the
@@ -13393,14 +14095,15 @@ func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request,
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidDocument
// The specified SSM document doesn't exist.
@@ -13418,8 +14121,8 @@ func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request,
//
// - UnsupportedPlatformType
// The document doesn't support the platform type of the given managed node
-// ID(s). For example, you sent an document for a Windows managed node to a
-// Linux node.
+// IDs. For example, you sent an document for a Windows managed node to a Linux
+// node.
//
// - MaxDocumentSizeExceeded
// The size limit of a document is 64 KB.
@@ -13428,7 +14131,8 @@ func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request,
// The role name can't contain invalid characters. Also verify that you specified
// an IAM role for notifications that includes the required trust policy. For
// information about configuring the IAM role for Run Command notifications,
-// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html)
+// see Monitoring Systems Manager status changes using Amazon SNS notifications
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// - InvalidNotificationConfig
@@ -14158,13 +14862,12 @@ func (c *SSM) UpdateAssociationRequest(input *UpdateAssociationInput) (req *requ
// that you call the DescribeAssociation API operation and make a note of all
// optional parameters required for your UpdateAssociation call.
//
-// In order to call this API operation, your Identity and Access Management
-// (IAM) user account, group, or role must be configured with permission to
-// call the DescribeAssociation API operation. If you don't have permission
-// to call DescribeAssociation, then you receive the following error: An error
-// occurred (AccessDeniedException) when calling the UpdateAssociation operation:
-// User: isn't authorized to perform: ssm:DescribeAssociation on
-// resource:
+// In order to call this API operation, a user, group, or role must be granted
+// permission to call the DescribeAssociation API operation. If you don't have
+// permission to call DescribeAssociation, then you receive the following error:
+// An error occurred (AccessDeniedException) when calling the UpdateAssociation
+// operation: User: isn't authorized to perform: ssm:DescribeAssociation
+// on resource:
//
// When you update an association, the association immediately runs against
// the specified targets. You can add the ApplyOnlyAtCronInterval parameter
@@ -14314,14 +15017,15 @@ func (c *SSM) UpdateAssociationStatusRequest(input *UpdateAssociationStatusInput
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InvalidDocument
// The specified SSM document doesn't exist.
@@ -15034,14 +15738,15 @@ func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleI
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
//
// - InternalServerError
// An error occurred on the server side.
@@ -15113,14 +15818,14 @@ func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Requ
// UpdateOpsItem API operation for Amazon Simple Systems Manager (SSM).
//
// Edit or change an OpsItem. You must have permission in Identity and Access
-// Management (IAM) to update an OpsItem. For more information, see Getting
-// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html)
+// Management (IAM) to update an OpsItem. For more information, see Set up OpsCenter
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setup.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Operations engineers and IT professionals use Amazon Web Services Systems
// Manager OpsCenter to view, investigate, and remediate operational issues
// impacting the performance and health of their Amazon Web Services resources.
-// For more information, see OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
+// For more information, see Amazon Web Services Systems Manager OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -15142,13 +15847,20 @@ func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Requ
// The OpsItem already exists.
//
// - OpsItemLimitExceededException
-// The request caused OpsItems to exceed one or more quotas. For information
-// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+// The request caused OpsItems to exceed one or more quotas.
//
// - OpsItemInvalidParameterException
// A specified parameter argument isn't valid. Verify the available arguments
// and try again.
//
+// - OpsItemAccessDeniedException
+// You don't have permission to view OpsItems in the specified account. Verify
+// that your account is configured either as a Systems Manager delegated administrator
+// or that you are logged into the Organizations management account.
+//
+// - OpsItemConflictException
+// The specified OpsItem is in the process of being deleted.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/UpdateOpsItem
func (c *SSM) UpdateOpsItem(input *UpdateOpsItemInput) (*UpdateOpsItemOutput, error) {
req, out := c.UpdateOpsItemRequest(input)
@@ -15734,7 +16446,8 @@ type AddTagsToResourceInput struct {
// object with an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager
// has a ResourceID of either aws/ssm/MyGroup/appmanager or /aws/ssm/MyGroup/appmanager.
//
- // For the Document and Parameter values, use the name of the resource.
+ // For the Document and Parameter values, use the name of the resource. If you're
+ // tagging a shared document, you must use the full ARN of the document.
//
// ManagedInstance: mi-012345abcde
//
@@ -15849,6 +16562,175 @@ func (s AddTagsToResourceOutput) GoString() string {
return s.String()
}
+// A CloudWatch alarm you apply to an automation or command.
+type Alarm struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your CloudWatch alarm.
+ //
+ // Name is a required field
+ Name *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Alarm) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s Alarm) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Alarm) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Alarm"}
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetName sets the Name field's value.
+func (s *Alarm) SetName(v string) *Alarm {
+ s.Name = &v
+ return s
+}
+
+// The details for the CloudWatch alarm you want to apply to an automation or
+// command.
+type AlarmConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the CloudWatch alarm specified in the configuration.
+ //
+ // Alarms is a required field
+ Alarms []*Alarm `min:"1" type:"list" required:"true"`
+
+ // When this value is true, your automation or command continues to run in cases
+ // where we can’t retrieve alarm status information from CloudWatch. In cases
+ // where we successfully retrieve an alarm status of OK or INSUFFICIENT_DATA,
+ // the automation or command continues to run, regardless of this value. Default
+ // is false.
+ IgnorePollAlarmFailure *bool `type:"boolean"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlarmConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlarmConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AlarmConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AlarmConfiguration"}
+ if s.Alarms == nil {
+ invalidParams.Add(request.NewErrParamRequired("Alarms"))
+ }
+ if s.Alarms != nil && len(s.Alarms) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Alarms", 1))
+ }
+ if s.Alarms != nil {
+ for i, v := range s.Alarms {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Alarms", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAlarms sets the Alarms field's value.
+func (s *AlarmConfiguration) SetAlarms(v []*Alarm) *AlarmConfiguration {
+ s.Alarms = v
+ return s
+}
+
+// SetIgnorePollAlarmFailure sets the IgnorePollAlarmFailure field's value.
+func (s *AlarmConfiguration) SetIgnorePollAlarmFailure(v bool) *AlarmConfiguration {
+ s.IgnorePollAlarmFailure = &v
+ return s
+}
+
+// The details about the state of your CloudWatch alarm.
+type AlarmStateInformation struct {
+ _ struct{} `type:"structure"`
+
+ // The name of your CloudWatch alarm.
+ //
+ // Name is a required field
+ Name *string `min:"1" type:"string" required:"true"`
+
+ // The state of your CloudWatch alarm.
+ //
+ // State is a required field
+ State *string `type:"string" required:"true" enum:"ExternalAlarmState"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlarmStateInformation) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AlarmStateInformation) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *AlarmStateInformation) SetName(v string) *AlarmStateInformation {
+ s.Name = &v
+ return s
+}
+
+// SetState sets the State field's value.
+func (s *AlarmStateInformation) SetState(v string) *AlarmStateInformation {
+ s.State = &v
+ return s
+}
+
// Error returned if an attempt is made to register a patch group with a patch
// baseline that is already registered with a different patch baseline.
type AlreadyExistsException struct {
@@ -16134,6 +17016,11 @@ type Association struct {
// form another account, you must set the document version to default.
DocumentVersion *string `type:"string"`
+ // The number of hours that an association can run on specified targets. After
+ // the resulting cutoff time passes, associations that are currently running
+ // are cancelled, and no pending executions are started on remaining targets.
+ Duration *int64 `min:"1" type:"integer"`
+
// The managed node ID.
InstanceId *string `type:"string"`
@@ -16205,6 +17092,12 @@ func (s *Association) SetDocumentVersion(v string) *Association {
return s
}
+// SetDuration sets the Duration field's value.
+func (s *Association) SetDuration(v int64) *Association {
+ s.Duration = &v
+ return s
+}
+
// SetInstanceId sets the InstanceId field's value.
func (s *Association) SetInstanceId(v string) *Association {
s.InstanceId = &v
@@ -16321,6 +17214,10 @@ func (s *AssociationAlreadyExists) RequestID() string {
type AssociationDescription struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// By default, when you create a new associations, the system runs it immediately
// after it is created and then according to the schedule you specified. Specify
// this option if you don't want an association to run immediately after you
@@ -16357,6 +17254,11 @@ type AssociationDescription struct {
// The document version.
DocumentVersion *string `type:"string"`
+ // The number of hours that an association can run on specified targets. After
+ // the resulting cutoff time passes, associations that are currently running
+ // are cancelled, and no pending executions are started on remaining targets.
+ Duration *int64 `min:"1" type:"integer"`
+
// The managed node ID.
InstanceId *string `type:"string"`
@@ -16444,6 +17346,9 @@ type AssociationDescription struct {
// The managed nodes targeted by the request.
Targets []*Target `type:"list"`
+
+ // The CloudWatch alarm that was invoked during the association.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
}
// String returns the string representation.
@@ -16464,6 +17369,12 @@ func (s AssociationDescription) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *AssociationDescription) SetAlarmConfiguration(v *AlarmConfiguration) *AssociationDescription {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value.
func (s *AssociationDescription) SetApplyOnlyAtCronInterval(v bool) *AssociationDescription {
s.ApplyOnlyAtCronInterval = &v
@@ -16518,6 +17429,12 @@ func (s *AssociationDescription) SetDocumentVersion(v string) *AssociationDescri
return s
}
+// SetDuration sets the Duration field's value.
+func (s *AssociationDescription) SetDuration(v int64) *AssociationDescription {
+ s.Duration = &v
+ return s
+}
+
// SetInstanceId sets the InstanceId field's value.
func (s *AssociationDescription) SetInstanceId(v string) *AssociationDescription {
s.InstanceId = &v
@@ -16620,6 +17537,12 @@ func (s *AssociationDescription) SetTargets(v []*Target) *AssociationDescription
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *AssociationDescription) SetTriggeredAlarms(v []*AlarmStateInformation) *AssociationDescription {
+ s.TriggeredAlarms = v
+ return s
+}
+
// The specified association doesn't exist.
type AssociationDoesNotExist struct {
_ struct{} `type:"structure"`
@@ -16688,6 +17611,10 @@ func (s *AssociationDoesNotExist) RequestID() string {
type AssociationExecution struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The association ID.
AssociationId *string `type:"string"`
@@ -16712,6 +17639,9 @@ type AssociationExecution struct {
// The status of the association execution.
Status *string `type:"string"`
+
+ // The CloudWatch alarms that were invoked by the association.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
}
// String returns the string representation.
@@ -16732,6 +17662,12 @@ func (s AssociationExecution) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *AssociationExecution) SetAlarmConfiguration(v *AlarmConfiguration) *AssociationExecution {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetAssociationId sets the AssociationId field's value.
func (s *AssociationExecution) SetAssociationId(v string) *AssociationExecution {
s.AssociationId = &v
@@ -16780,6 +17716,12 @@ func (s *AssociationExecution) SetStatus(v string) *AssociationExecution {
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *AssociationExecution) SetTriggeredAlarms(v []*AlarmStateInformation) *AssociationExecution {
+ s.TriggeredAlarms = v
+ return s
+}
+
// The specified execution ID doesn't exist. Verify the ID number and try again.
type AssociationExecutionDoesNotExist struct {
_ struct{} `type:"structure"`
@@ -17395,6 +18337,11 @@ type AssociationVersionInfo struct {
// used when the association version was created.
DocumentVersion *string `type:"string"`
+ // The number of hours that an association can run on specified targets. After
+ // the resulting cutoff time passes, associations that are currently running
+ // are cancelled, and no pending executions are started on remaining targets.
+ Duration *int64 `min:"1" type:"integer"`
+
// The maximum number of targets allowed to run the association at the same
// time. You can specify a number, for example 10, or a percentage of the target
// set, for example 10%. The default value is 100%, which means all targets
@@ -17536,6 +18483,12 @@ func (s *AssociationVersionInfo) SetDocumentVersion(v string) *AssociationVersio
return s
}
+// SetDuration sets the Duration field's value.
+func (s *AssociationVersionInfo) SetDuration(v int64) *AssociationVersionInfo {
+ s.Duration = &v
+ return s
+}
+
// SetMaxConcurrency sets the MaxConcurrency field's value.
func (s *AssociationVersionInfo) SetMaxConcurrency(v string) *AssociationVersionInfo {
s.MaxConcurrency = &v
@@ -18046,6 +18999,9 @@ func (s *AutomationDefinitionVersionNotFoundException) RequestID() string {
type AutomationExecution struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm applied to your automation.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The ID of a State Manager association used in the Automation operation.
AssociationId *string `type:"string"`
@@ -18151,6 +19107,12 @@ type AutomationExecution struct {
// The specified targets.
Targets []*Target `type:"list"`
+
+ // The CloudWatch alarm that was invoked by the automation.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
+
+ // Variables defined for the automation.
+ Variables map[string][]*string `min:"1" type:"map"`
}
// String returns the string representation.
@@ -18171,6 +19133,12 @@ func (s AutomationExecution) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *AutomationExecution) SetAlarmConfiguration(v *AlarmConfiguration) *AutomationExecution {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetAssociationId sets the AssociationId field's value.
func (s *AutomationExecution) SetAssociationId(v string) *AutomationExecution {
s.AssociationId = &v
@@ -18357,6 +19325,18 @@ func (s *AutomationExecution) SetTargets(v []*Target) *AutomationExecution {
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *AutomationExecution) SetTriggeredAlarms(v []*AlarmStateInformation) *AutomationExecution {
+ s.TriggeredAlarms = v
+ return s
+}
+
+// SetVariables sets the Variables field's value.
+func (s *AutomationExecution) SetVariables(v map[string][]*string) *AutomationExecution {
+ s.Variables = v
+ return s
+}
+
// A filter used to match specific automation executions. This is used to limit
// the scope of Automation execution information returned.
type AutomationExecutionFilter struct {
@@ -18492,6 +19472,9 @@ func (s *AutomationExecutionLimitExceededException) RequestID() string {
type AutomationExecutionMetadata struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm applied to your automation.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The ID of a State Manager association used in the Automation operation.
AssociationId *string `type:"string"`
@@ -18587,6 +19570,9 @@ type AutomationExecutionMetadata struct {
// The targets defined by the user when starting the automation.
Targets []*Target `type:"list"`
+
+ // The CloudWatch alarm that was invoked by the automation.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
}
// String returns the string representation.
@@ -18607,6 +19593,12 @@ func (s AutomationExecutionMetadata) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *AutomationExecutionMetadata) SetAlarmConfiguration(v *AlarmConfiguration) *AutomationExecutionMetadata {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetAssociationId sets the AssociationId field's value.
func (s *AutomationExecutionMetadata) SetAssociationId(v string) *AutomationExecutionMetadata {
s.AssociationId = &v
@@ -18775,6 +19767,12 @@ func (s *AutomationExecutionMetadata) SetTargets(v []*Target) *AutomationExecuti
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *AutomationExecutionMetadata) SetTriggeredAlarms(v []*AlarmStateInformation) *AutomationExecutionMetadata {
+ s.TriggeredAlarms = v
+ return s
+}
+
// There is no automation execution information for the requested automation
// execution ID.
type AutomationExecutionNotFoundException struct {
@@ -19283,6 +20281,9 @@ func (s *CloudWatchOutputConfig) SetCloudWatchOutputEnabled(v bool) *CloudWatchO
type Command struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm applied to your command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// Amazon CloudWatch Logs information where you want Amazon Web Services Systems
// Manager to send the command output.
CloudWatchOutputConfig *CloudWatchOutputConfig `type:"structure"`
@@ -19324,7 +20325,7 @@ type Command struct {
// The maximum number of managed nodes that are allowed to run the command at
// the same time. You can specify a number of managed nodes, such as 10, or
// a percentage of nodes, such as 10%. The default value is 50. For more information
- // about how to use MaxConcurrency, see Running commands using Systems Manager
+ // about how to use MaxConcurrency, see Amazon Web Services Systems Manager
// Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html)
// in the Amazon Web Services Systems Manager User Guide.
MaxConcurrency *string `min:"1" type:"string"`
@@ -19332,8 +20333,8 @@ type Command struct {
// The maximum number of errors allowed before the system stops sending the
// command to additional targets. You can specify a number of errors, such as
// 10, or a percentage or errors, such as 10%. The default value is 0. For more
- // information about how to use MaxErrors, see Running commands using Systems
- // Manager Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html)
+ // information about how to use MaxErrors, see Amazon Web Services Systems Manager
+ // Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html)
// in the Amazon Web Services Systems Manager User Guide.
MaxErrors *string `min:"1" type:"string"`
@@ -19421,6 +20422,9 @@ type Command struct {
// The TimeoutSeconds value specified for a command.
TimeoutSeconds *int64 `min:"30" type:"integer"`
+
+ // The CloudWatch alarm that was invoked by the command.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
}
// String returns the string representation.
@@ -19441,6 +20445,12 @@ func (s Command) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *Command) SetAlarmConfiguration(v *AlarmConfiguration) *Command {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value.
func (s *Command) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *Command {
s.CloudWatchOutputConfig = v
@@ -19585,6 +20595,12 @@ func (s *Command) SetTimeoutSeconds(v int64) *Command {
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *Command) SetTriggeredAlarms(v []*AlarmStateInformation) *Command {
+ s.TriggeredAlarms = v
+ return s
+}
+
// Describes a command filter.
//
// A managed node ID can't be specified when a command status is Pending because
@@ -20122,7 +21138,7 @@ func (s *CommandPlugin) SetStatusDetails(v string) *CommandPlugin {
// A summary of the call execution that includes an execution ID, the type of
// execution (for example, Command), and the date/time of the execution using
-// a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.
+// a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'
type ComplianceExecutionSummary struct {
_ struct{} `type:"structure"`
@@ -20131,7 +21147,7 @@ type ComplianceExecutionSummary struct {
ExecutionId *string `type:"string"`
// The time the execution ran as a datetime object that is saved in the following
- // format: yyyy-MM-dd'T'HH:mm:ss'Z'.
+ // format: yyyy-MM-dd'T'HH:mm:ss'Z'
//
// ExecutionTime is a required field
ExecutionTime *time.Time `type:"timestamp" required:"true"`
@@ -20645,10 +21661,13 @@ type CreateActivationInput struct {
// The name of the Identity and Access Management (IAM) role that you want to
// assign to the managed node. This IAM role must provide AssumeRole permissions
// for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com.
- // For more information, see Create an IAM service role for a hybrid environment
- // (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html)
+ // For more information, see Create an IAM service role for a hybrid and multicloud
+ // environment (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html)
// in the Amazon Web Services Systems Manager User Guide.
//
+ // You can't specify an IAM service-linked role for this parameter. You must
+ // create a unique role.
+ //
// IamRole is a required field
IamRole *string `type:"string" required:"true"`
@@ -20926,6 +21945,10 @@ func (s *CreateAssociationBatchOutput) SetSuccessful(v []*AssociationDescription
type CreateAssociationBatchRequestEntry struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// By default, when you create a new associations, the system runs it immediately
// after it is created and then according to the schedule you specified. Specify
// this option if you don't want an association to run immediately after you
@@ -20952,6 +21975,22 @@ type CreateAssociationBatchRequestEntry struct {
// The document version.
DocumentVersion *string `type:"string"`
+ // The number of hours the association can run before it is canceled. Duration
+ // applies to associations that are currently running, and any pending and in
+ // progress commands on all targets. If a target was taken offline for the association
+ // to run, it is made available again immediately, without a reboot.
+ //
+ // The Duration parameter applies only when both these conditions are true:
+ //
+ // * The association for which you specify a duration is cancelable according
+ // to the parameters of the SSM command document or Automation runbook associated
+ // with this execution.
+ //
+ // * The command specifies the ApplyOnlyAtCronInterval (https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_CreateAssociationBatchRequestEntry.html#systemsmanager-Type-CreateAssociationBatchRequestEntry-ApplyOnlyAtCronInterval)
+ // parameter, which means that the association doesn't run immediately after
+ // it is created, but only according to the specified schedule.
+ Duration *int64 `min:"1" type:"integer"`
+
// The managed node ID.
//
// InstanceId has been deprecated. To specify a managed node ID for an association,
@@ -21078,6 +22117,9 @@ func (s *CreateAssociationBatchRequestEntry) Validate() error {
if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1))
}
+ if s.Duration != nil && *s.Duration < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Duration", 1))
+ }
if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 {
invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1))
}
@@ -21096,6 +22138,11 @@ func (s *CreateAssociationBatchRequestEntry) Validate() error {
if s.TargetLocations != nil && len(s.TargetLocations) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TargetLocations", 1))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.OutputLocation != nil {
if err := s.OutputLocation.Validate(); err != nil {
invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams))
@@ -21128,6 +22175,12 @@ func (s *CreateAssociationBatchRequestEntry) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *CreateAssociationBatchRequestEntry) SetAlarmConfiguration(v *AlarmConfiguration) *CreateAssociationBatchRequestEntry {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value.
func (s *CreateAssociationBatchRequestEntry) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationBatchRequestEntry {
s.ApplyOnlyAtCronInterval = &v
@@ -21164,6 +22217,12 @@ func (s *CreateAssociationBatchRequestEntry) SetDocumentVersion(v string) *Creat
return s
}
+// SetDuration sets the Duration field's value.
+func (s *CreateAssociationBatchRequestEntry) SetDuration(v int64) *CreateAssociationBatchRequestEntry {
+ s.Duration = &v
+ return s
+}
+
// SetInstanceId sets the InstanceId field's value.
func (s *CreateAssociationBatchRequestEntry) SetInstanceId(v string) *CreateAssociationBatchRequestEntry {
s.InstanceId = &v
@@ -21239,6 +22298,10 @@ func (s *CreateAssociationBatchRequestEntry) SetTargets(v []*Target) *CreateAsso
type CreateAssociationInput struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// By default, when you create a new association, the system runs it immediately
// after it is created and then according to the schedule you specified. Specify
// this option if you don't want an association to run immediately after you
@@ -21263,7 +22326,7 @@ type CreateAssociationInput struct {
// The severity level to assign to the association.
ComplianceSeverity *string `type:"string" enum:"AssociationComplianceSeverity"`
- // The document version you want to associate with the target(s). Can be a specific
+ // The document version you want to associate with the targets. Can be a specific
// version or the default version.
//
// State Manager doesn't support running associations that use a new version
@@ -21274,6 +22337,22 @@ type CreateAssociationInput struct {
// form another account, you must set the document version to default.
DocumentVersion *string `type:"string"`
+ // The number of hours the association can run before it is canceled. Duration
+ // applies to associations that are currently running, and any pending and in
+ // progress commands on all targets. If a target was taken offline for the association
+ // to run, it is made available again immediately, without a reboot.
+ //
+ // The Duration parameter applies only when both these conditions are true:
+ //
+ // * The association for which you specify a duration is cancelable according
+ // to the parameters of the SSM command document or Automation runbook associated
+ // with this execution.
+ //
+ // * The command specifies the ApplyOnlyAtCronInterval (https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_CreateAssociation.html#systemsmanager-CreateAssociation-request-ApplyOnlyAtCronInterval)
+ // parameter, which means that the association doesn't run immediately after
+ // it is created, but only according to the specified schedule.
+ Duration *int64 `min:"1" type:"integer"`
+
// The managed node ID.
//
// InstanceId has been deprecated. To specify a managed node ID for an association,
@@ -21315,7 +22394,7 @@ type CreateAssociationInput struct {
// the configuration information for the managed node.
//
// You can specify Amazon Web Services-predefined documents, documents you created,
- // or a document that is shared with you from another account.
+ // or a document that is shared with you from another Amazon Web Services account.
//
// For Systems Manager documents (SSM documents) that are shared with you from
// other Amazon Web Services accounts, you must specify the complete SSM document
@@ -21345,7 +22424,7 @@ type CreateAssociationInput struct {
// String and GoString methods.
Parameters map[string][]*string `type:"map" sensitive:"true"`
- // A cron expression when the association will be applied to the target(s).
+ // A cron expression when the association will be applied to the targets.
ScheduleExpression *string `min:"1" type:"string"`
// Number of days to wait after the scheduled day to run an association. For
@@ -21395,7 +22474,7 @@ type CreateAssociationInput struct {
// account, or individual managed node IDs. You can target all managed nodes
// in an Amazon Web Services account by specifying the InstanceIds key with
// a value of *. For more information about choosing targets for an association,
- // see Using targets and rate controls with State Manager associations (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-state-manager-targets-and-rate-controls.html)
+ // see About targets and rate controls in State Manager associations (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-state-manager-targets-and-rate-controls.html)
// in the Amazon Web Services Systems Manager User Guide.
Targets []*Target `type:"list"`
}
@@ -21424,6 +22503,9 @@ func (s *CreateAssociationInput) Validate() error {
if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1))
}
+ if s.Duration != nil && *s.Duration < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Duration", 1))
+ }
if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 {
invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1))
}
@@ -21442,6 +22524,11 @@ func (s *CreateAssociationInput) Validate() error {
if s.TargetLocations != nil && len(s.TargetLocations) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TargetLocations", 1))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.OutputLocation != nil {
if err := s.OutputLocation.Validate(); err != nil {
invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams))
@@ -21484,6 +22571,12 @@ func (s *CreateAssociationInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *CreateAssociationInput) SetAlarmConfiguration(v *AlarmConfiguration) *CreateAssociationInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value.
func (s *CreateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationInput {
s.ApplyOnlyAtCronInterval = &v
@@ -21520,6 +22613,12 @@ func (s *CreateAssociationInput) SetDocumentVersion(v string) *CreateAssociation
return s
}
+// SetDuration sets the Duration field's value.
+func (s *CreateAssociationInput) SetDuration(v int64) *CreateAssociationInput {
+ s.Duration = &v
+ return s
+}
+
// SetInstanceId sets the InstanceId field's value.
func (s *CreateAssociationInput) SetInstanceId(v string) *CreateAssociationInput {
s.InstanceId = &v
@@ -21635,18 +22734,20 @@ type CreateDocumentInput struct {
// A list of key-value pairs that describe attachments to a version of a document.
Attachments []*AttachmentsSource `type:"list"`
- // The content for the new SSM document in JSON or YAML format. We recommend
- // storing the contents for your new document in an external JSON or YAML file
- // and referencing the file in a command.
+ // The content for the new SSM document in JSON or YAML format. The content
+ // of the document must not exceed 64KB. This quota also includes the content
+ // specified for input parameters at runtime. We recommend storing the contents
+ // for your new document in an external JSON or YAML file and referencing the
+ // file in a command.
//
// For examples, see the following topics in the Amazon Web Services Systems
// Manager User Guide.
//
- // * Create an SSM document (Amazon Web Services API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html)
+ // * Create an SSM document (console) (https://docs.aws.amazon.com/systems-manager/latest/userguide/documents-using.html#create-ssm-console)
//
- // * Create an SSM document (Amazon Web Services CLI) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-cli.html)
+ // * Create an SSM document (command line) (https://docs.aws.amazon.com/systems-manager/latest/userguide/documents-using.html#create-ssm-document-cli)
//
- // * Create an SSM document (API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html)
+ // * Create an SSM document (API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/documents-using.html#create-ssm-document-api)
//
// Content is a required field
Content *string `min:"1" type:"string" required:"true"`
@@ -21677,6 +22778,12 @@ type CreateDocumentInput struct {
//
// * amzn
//
+ // * AWSEC2
+ //
+ // * AWSConfigRemediation
+ //
+ // * AWSSupport
+ //
// Name is a required field
Name *string `type:"string" required:"true"`
@@ -21950,6 +23057,9 @@ type CreateMaintenanceWindowInput struct {
// The date and time, in ISO-8601 Extended format, for when you want the maintenance
// window to become active. StartDate allows you to delay activation of the
// maintenance window until the specified future date.
+ //
+ // When using a rate schedule, if you provide a start date that occurs in the
+ // past, the current date and time are used as the start date.
StartDate *string `type:"string"`
// Optional metadata that you assign to a resource. Tags enable you to categorize
@@ -22146,6 +23256,12 @@ func (s *CreateMaintenanceWindowOutput) SetWindowId(v string) *CreateMaintenance
type CreateOpsItemInput struct {
_ struct{} `type:"structure"`
+ // The target Amazon Web Services account where you want to create an OpsItem.
+ // To make this call, your account must be configured to work with OpsItems
+ // across accounts. For more information, see Set up OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-setup.html)
+ // in the Amazon Web Services Systems Manager User Guide.
+ AccountId *string `type:"string"`
+
// The time a runbook workflow ended. Currently reported only for the OpsItem
// type /aws/changerequest.
ActualEndTime *time.Time `type:"timestamp"`
@@ -22157,7 +23273,11 @@ type CreateOpsItemInput struct {
// Specify a category to assign to an OpsItem.
Category *string `min:"1" type:"string"`
- // Information about the OpsItem.
+ // User-defined text that contains information about the OpsItem, in Markdown
+ // format.
+ //
+ // Provide enough information so that users viewing this OpsItem for the first
+ // time understand the issue.
//
// Description is a required field
Description *string `min:"1" type:"string" required:"true"`
@@ -22185,12 +23305,21 @@ type CreateOpsItemInput struct {
// Use the /aws/resources key in OperationalData to specify a related resource
// in the request. Use the /aws/automations key in OperationalData to associate
// an Automation runbook with the OpsItem. To view Amazon Web Services CLI example
- // commands that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems)
+ // commands that use these keys, see Create OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-manually-create-OpsItems.html)
// in the Amazon Web Services Systems Manager User Guide.
OperationalData map[string]*OpsItemDataValue `type:"map"`
- // The type of OpsItem to create. Currently, the only valid values are /aws/changerequest
- // and /aws/issue.
+ // The type of OpsItem to create. Systems Manager supports the following types
+ // of OpsItems:
+ //
+ // * /aws/issue This type of OpsItem is used for default OpsItems created
+ // by OpsCenter.
+ //
+ // * /aws/changerequest This type of OpsItem is used by Change Manager for
+ // reviewing and approving or rejecting change requests.
+ //
+ // * /aws/insight This type of OpsItem is used by OpsCenter for aggregating
+ // and reporting on duplicate OpsItems.
OpsItemType *string `type:"string"`
// The time specified in a change request for a runbook workflow to end. Currently
@@ -22219,10 +23348,7 @@ type CreateOpsItemInput struct {
// Source is a required field
Source *string `min:"1" type:"string" required:"true"`
- // Optional metadata that you assign to a resource. You can restrict access
- // to OpsItems by using an inline IAM policy that specifies tags. For more information,
- // see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html#OpsCenter-getting-started-user-permissions)
- // in the Amazon Web Services Systems Manager User Guide.
+ // Optional metadata that you assign to a resource.
//
// Tags use a key-value pair. For example:
//
@@ -22315,6 +23441,12 @@ func (s *CreateOpsItemInput) Validate() error {
return nil
}
+// SetAccountId sets the AccountId field's value.
+func (s *CreateOpsItemInput) SetAccountId(v string) *CreateOpsItemInput {
+ s.AccountId = &v
+ return s
+}
+
// SetActualEndTime sets the ActualEndTime field's value.
func (s *CreateOpsItemInput) SetActualEndTime(v time.Time) *CreateOpsItemInput {
s.ActualEndTime = &v
@@ -22408,6 +23540,9 @@ func (s *CreateOpsItemInput) SetTitle(v string) *CreateOpsItemInput {
type CreateOpsItemOutput struct {
_ struct{} `type:"structure"`
+ // The OpsItem Amazon Resource Name (ARN).
+ OpsItemArn *string `min:"20" type:"string"`
+
// The ID of the OpsItem.
OpsItemId *string `type:"string"`
}
@@ -22430,6 +23565,12 @@ func (s CreateOpsItemOutput) GoString() string {
return s.String()
}
+// SetOpsItemArn sets the OpsItemArn field's value.
+func (s *CreateOpsItemOutput) SetOpsItemArn(v string) *CreateOpsItemOutput {
+ s.OpsItemArn = &v
+ return s
+}
+
// SetOpsItemId sets the OpsItemId field's value.
func (s *CreateOpsItemOutput) SetOpsItemId(v string) *CreateOpsItemOutput {
s.OpsItemId = &v
@@ -22625,11 +23766,11 @@ type CreatePatchBaselineInput struct {
// with the patch baseline, and its status is reported as InstalledOther.
// This is the default action if no option is specified.
//
- // * BLOCK : Packages in the RejectedPatches list, and packages that include
- // them as dependencies, aren't installed under any circumstances. If a package
- // was installed before it was added to the Rejected patches list, it is
- // considered non-compliant with the patch baseline, and its status is reported
- // as InstalledRejected.
+ // * BLOCK: Packages in the Rejected patches list, and packages that include
+ // them as dependencies, aren't installed by Patch Manager under any circumstances.
+ // If a package was installed before it was added to the Rejected patches
+ // list, or is installed outside of Patch Manager afterward, it's considered
+ // noncompliant with the patch baseline and its status is reported as InstalledRejected.
RejectedPatchesAction *string `type:"string" enum:"PatchAction"`
// Information about the patches to use to update the managed nodes, including
@@ -23363,7 +24504,7 @@ type DeleteInventoryOutput struct {
DeletionId *string `type:"string"`
// A summary of the delete operation. For more information about this summary,
- // see Deleting custom inventory (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete-summary)
+ // see Understanding the delete inventory summary (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete-summary)
// in the Amazon Web Services Systems Manager User Guide.
DeletionSummary *InventoryDeletionSummary `type:"structure"`
@@ -23487,6 +24628,74 @@ func (s *DeleteMaintenanceWindowOutput) SetWindowId(v string) *DeleteMaintenance
return s
}
+type DeleteOpsItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the OpsItem that you want to delete.
+ //
+ // OpsItemId is a required field
+ OpsItemId *string `type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteOpsItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteOpsItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteOpsItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteOpsItemInput"}
+ if s.OpsItemId == nil {
+ invalidParams.Add(request.NewErrParamRequired("OpsItemId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetOpsItemId sets the OpsItemId field's value.
+func (s *DeleteOpsItemInput) SetOpsItemId(v string) *DeleteOpsItemInput {
+ s.OpsItemId = &v
+ return s
+}
+
+type DeleteOpsItemOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteOpsItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteOpsItemOutput) GoString() string {
+ return s.String()
+}
+
type DeleteOpsMetadataInput struct {
_ struct{} `type:"structure"`
@@ -23563,6 +24772,9 @@ type DeleteParameterInput struct {
// The name of the parameter to delete.
//
+ // You can't enter the Amazon Resource Name (ARN) for a parameter, only the
+ // parameter name itself.
+ //
// Name is a required field
Name *string `min:"1" type:"string" required:"true"`
}
@@ -23635,6 +24847,9 @@ type DeleteParametersInput struct {
// The names of the parameters to delete. After deleting a parameter, wait for
// at least 30 seconds to create a parameter with the same name.
//
+ // You can't enter the Amazon Resource Name (ARN) for a parameter, only the
+ // parameter name itself.
+ //
// Names is a required field
Names []*string `min:"1" type:"list" required:"true"`
}
@@ -23883,6 +25098,106 @@ func (s DeleteResourceDataSyncOutput) GoString() string {
return s.String()
}
+type DeleteResourcePolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // ID of the current policy version. The hash helps to prevent multiple calls
+ // from attempting to overwrite a policy.
+ //
+ // PolicyHash is a required field
+ PolicyHash *string `type:"string" required:"true"`
+
+ // The policy ID.
+ //
+ // PolicyId is a required field
+ PolicyId *string `type:"string" required:"true"`
+
+ // Amazon Resource Name (ARN) of the resource to which the policies are attached.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"20" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteResourcePolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteResourcePolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteResourcePolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"}
+ if s.PolicyHash == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyHash"))
+ }
+ if s.PolicyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyId"))
+ }
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPolicyHash sets the PolicyHash field's value.
+func (s *DeleteResourcePolicyInput) SetPolicyHash(v string) *DeleteResourcePolicyInput {
+ s.PolicyHash = &v
+ return s
+}
+
+// SetPolicyId sets the PolicyId field's value.
+func (s *DeleteResourcePolicyInput) SetPolicyId(v string) *DeleteResourcePolicyInput {
+ s.PolicyId = &v
+ return s
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput {
+ s.ResourceArn = &v
+ return s
+}
+
+type DeleteResourcePolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteResourcePolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DeleteResourcePolicyOutput) GoString() string {
+ return s.String()
+}
+
type DeregisterManagedInstanceInput struct {
_ struct{} `type:"structure"`
@@ -25260,8 +26575,8 @@ type DescribeDocumentInput struct {
Name *string `type:"string" required:"true"`
// An optional field specifying the version of the artifact associated with
- // the document. For example, "Release 12, Update 6". This value is unique across
- // all versions of a document, and can't be changed.
+ // the document. For example, 12.6. This value is unique across all versions
+ // of a document, and can't be changed.
VersionName *string `type:"string"`
}
@@ -25819,8 +27134,9 @@ type DescribeInstanceInformationInput struct {
_ struct{} `type:"structure"`
// One or more filters. Use a filter to return a more specific list of managed
- // nodes. You can filter based on tags applied to your managed nodes. Use this
- // Filters data type instead of InstanceInformationFilterList, which is deprecated.
+ // nodes. You can filter based on tags applied to your managed nodes. Tag filters
+ // can't be combined with other filter types. Use this Filters data type instead
+ // of InstanceInformationFilterList, which is deprecated.
Filters []*InstanceInformationStringFilter `type:"list"`
// This is a legacy method. We recommend that you don't use this method. Instead,
@@ -25833,7 +27149,7 @@ type DescribeInstanceInformationInput struct {
// The maximum number of items to return for this call. The call also returns
// a token that you can specify in a subsequent call to get the next set of
- // results.
+ // results. The default value is 10 items.
MaxResults *int64 `min:"5" type:"integer"`
// The token for the next set of items to return. (You received this token from
@@ -26218,6 +27534,9 @@ type DescribeInstancePatchesInput struct {
// * Severity Sample values: Important | Medium | Low
//
// * State Sample values: Installed | InstalledOther | InstalledPendingReboot
+ // For lists of all State values, see Understanding patch compliance state
+ // values (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-compliance-states.html)
+ // in the Amazon Web Services Systems Manager User Guide.
Filters []*PatchOrchestratorFilter `type:"list"`
// The ID of the managed node whose patch state information should be retrieved.
@@ -26356,6 +27675,147 @@ func (s *DescribeInstancePatchesOutput) SetPatches(v []*PatchComplianceData) *De
return s
}
+type DescribeInstancePropertiesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The request filters to use with the operator.
+ FiltersWithOperator []*InstancePropertyStringFilter `min:"1" type:"list"`
+
+ // An array of instance property filters.
+ InstancePropertyFilterList []*InstancePropertyFilter `min:"1" type:"list"`
+
+ // The maximum number of items to return for the call. The call also returns
+ // a token that you can specify in a subsequent call to get the next set of
+ // results.
+ MaxResults *int64 `min:"5" type:"integer"`
+
+ // The token provided by a previous request to use to return the next set of
+ // properties.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeInstancePropertiesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeInstancePropertiesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeInstancePropertiesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeInstancePropertiesInput"}
+ if s.FiltersWithOperator != nil && len(s.FiltersWithOperator) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("FiltersWithOperator", 1))
+ }
+ if s.InstancePropertyFilterList != nil && len(s.InstancePropertyFilterList) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("InstancePropertyFilterList", 1))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 5 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
+ }
+ if s.FiltersWithOperator != nil {
+ for i, v := range s.FiltersWithOperator {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FiltersWithOperator", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.InstancePropertyFilterList != nil {
+ for i, v := range s.InstancePropertyFilterList {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstancePropertyFilterList", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFiltersWithOperator sets the FiltersWithOperator field's value.
+func (s *DescribeInstancePropertiesInput) SetFiltersWithOperator(v []*InstancePropertyStringFilter) *DescribeInstancePropertiesInput {
+ s.FiltersWithOperator = v
+ return s
+}
+
+// SetInstancePropertyFilterList sets the InstancePropertyFilterList field's value.
+func (s *DescribeInstancePropertiesInput) SetInstancePropertyFilterList(v []*InstancePropertyFilter) *DescribeInstancePropertiesInput {
+ s.InstancePropertyFilterList = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeInstancePropertiesInput) SetMaxResults(v int64) *DescribeInstancePropertiesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeInstancePropertiesInput) SetNextToken(v string) *DescribeInstancePropertiesInput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeInstancePropertiesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Properties for the managed instances.
+ InstanceProperties []*InstanceProperty `type:"list"`
+
+ // The token for the next set of properties to return. Use this token to get
+ // the next set of results.
+ NextToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeInstancePropertiesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s DescribeInstancePropertiesOutput) GoString() string {
+ return s.String()
+}
+
+// SetInstanceProperties sets the InstanceProperties field's value.
+func (s *DescribeInstancePropertiesOutput) SetInstanceProperties(v []*InstanceProperty) *DescribeInstancePropertiesOutput {
+ s.InstanceProperties = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeInstancePropertiesOutput) SetNextToken(v string) *DescribeInstancePropertiesOutput {
+ s.NextToken = &v
+ return s
+}
+
type DescribeInventoryDeletionsInput struct {
_ struct{} `type:"structure"`
@@ -27613,6 +29073,8 @@ type DescribeOpsItemsInput struct {
//
// * Key: AutomationId Operations: Equals
//
+ // * Key: AccountId Operations: Equals
+ //
// *The Equals operator for Title matches the first 100 characters. If you specify
// more than 100 characters, they system returns an error that the filter value
// exceeds the length limit.
@@ -27739,6 +29201,21 @@ type DescribeParametersInput struct {
// Filters to limit the request results.
ParameterFilters []*ParameterStringFilter `type:"list"`
+
+ // Lists parameters that are shared with you.
+ //
+ // By default when using this option, the command returns parameters that have
+ // been shared using a standard Resource Access Manager Resource Share. In order
+ // for a parameter that was shared using the PutResourcePolicy command to be
+ // returned, the associated RAM Resource Share Created From Policy must have
+ // been promoted to a standard Resource Share using the RAM PromoteResourceShareCreatedFromPolicy
+ // (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html)
+ // API operation.
+ //
+ // For more information about sharing parameters, see Working with shared parameters
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-shared-parameters.html)
+ // in the Amazon Web Services Systems Manager User Guide.
+ Shared *bool `type:"boolean"`
}
// String returns the string representation.
@@ -27816,6 +29293,12 @@ func (s *DescribeParametersInput) SetParameterFilters(v []*ParameterStringFilter
return s
}
+// SetShared sets the Shared field's value.
+func (s *DescribeParametersInput) SetShared(v bool) *DescribeParametersInput {
+ s.Shared = &v
+ return s
+}
+
type DescribeParametersOutput struct {
_ struct{} `type:"structure"`
@@ -28827,7 +30310,7 @@ type DocumentDescription struct {
// The name of the SSM document.
Name *string `type:"string"`
- // The Amazon Web Services user account that created the document.
+ // The Amazon Web Services user that created the document.
Owner *string `type:"string"`
// A description of the parameters for a document.
@@ -29166,7 +30649,7 @@ type DocumentIdentifier struct {
// The name of the SSM document.
Name *string `type:"string"`
- // The Amazon Web Services user account that created the document.
+ // The Amazon Web Services user that created the document.
Owner *string `type:"string"`
// The operating system platform.
@@ -29192,8 +30675,8 @@ type DocumentIdentifier struct {
TargetType *string `type:"string"`
// An optional field specifying the version of the artifact associated with
- // the document. For example, "Release 12, Update 6". This value is unique across
- // all versions of a document, and can't be changed.
+ // the document. For example, 12.6. This value is unique across all versions
+ // of a document, and can't be changed.
VersionName *string `type:"string"`
}
@@ -29522,7 +31005,7 @@ func (s *DocumentMetadataResponseInfo) SetReviewerResponse(v []*DocumentReviewer
return s
}
-// Parameters specified in a System Manager document that run on the server
+// Parameters specified in a Systems Manager document that run on the server
// when the command is run.
type DocumentParameter struct {
_ struct{} `type:"structure"`
@@ -29584,10 +31067,15 @@ func (s *DocumentParameter) SetType(v string) *DocumentParameter {
return s
}
-// The document can't be shared with more Amazon Web Services user accounts.
-// You can share a document with a maximum of 20 accounts. You can publicly
-// share up to five documents. If you need to increase this limit, contact Amazon
-// Web Services Support.
+// The document can't be shared with more Amazon Web Services accounts. You
+// can specify a maximum of 20 accounts per API operation to share a private
+// document.
+//
+// By default, you can share a private document with a maximum of 1,000 accounts
+// and publicly share up to five documents.
+//
+// If you need to increase the quota for privately or publicly shared Systems
+// Manager documents, contact Amazon Web Services Support.
type DocumentPermissionLimit struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -29661,8 +31149,16 @@ type DocumentRequires struct {
// Name is a required field
Name *string `type:"string" required:"true"`
+ // The document type of the required SSM document.
+ RequireType *string `type:"string"`
+
// The document version required by the current document.
Version *string `type:"string"`
+
+ // An optional field specifying the version of the artifact associated with
+ // the document. For example, 12.6. This value is unique across all versions
+ // of a document, and can't be changed.
+ VersionName *string `type:"string"`
}
// String returns the string representation.
@@ -29702,12 +31198,24 @@ func (s *DocumentRequires) SetName(v string) *DocumentRequires {
return s
}
+// SetRequireType sets the RequireType field's value.
+func (s *DocumentRequires) SetRequireType(v string) *DocumentRequires {
+ s.RequireType = &v
+ return s
+}
+
// SetVersion sets the Version field's value.
func (s *DocumentRequires) SetVersion(v string) *DocumentRequires {
s.Version = &v
return s
}
+// SetVersionName sets the VersionName field's value.
+func (s *DocumentRequires) SetVersionName(v string) *DocumentRequires {
+ s.VersionName = &v
+ return s
+}
+
// Information about comments added to a document review request.
type DocumentReviewCommentSource struct {
_ struct{} `type:"structure"`
@@ -29943,9 +31451,8 @@ type DocumentVersionInfo struct {
// S3 bucket is correct."
StatusInformation *string `type:"string"`
- // The version of the artifact associated with the document. For example, "Release
- // 12, Update 6". This value is unique across all versions of a document, and
- // can't be changed.
+ // The version of the artifact associated with the document. For example, 12.6.
+ // This value is unique across all versions of a document, and can't be changed.
VersionName *string `type:"string"`
}
@@ -31156,8 +32663,7 @@ func (s *GetConnectionStatusInput) SetTarget(v string) *GetConnectionStatusInput
type GetConnectionStatusOutput struct {
_ struct{} `type:"structure"`
- // The status of the connection to the managed node. For example, 'Connected'
- // or 'Not Connected'.
+ // The status of the connection to the managed node.
Status *string `type:"string" enum:"ConnectionStatus"`
// The ID of the managed node to check connection status.
@@ -31419,8 +32925,8 @@ type GetDocumentInput struct {
Name *string `type:"string" required:"true"`
// An optional field specifying the version of the artifact associated with
- // the document. For example, "Release 12, Update 6". This value is unique across
- // all versions of a document and can't be changed.
+ // the document. For example, 12.6. This value is unique across all versions
+ // of a document and can't be changed.
VersionName *string `type:"string"`
}
@@ -31533,9 +33039,8 @@ type GetDocumentOutput struct {
// S3 bucket is correct."
StatusInformation *string `type:"string"`
- // The version of the artifact associated with the document. For example, "Release
- // 12, Update 6". This value is unique across all versions of a document, and
- // can't be changed.
+ // The version of the artifact associated with the document. For example, 12.6.
+ // This value is unique across all versions of a document, and can't be changed.
VersionName *string `type:"string"`
}
@@ -32347,6 +33852,10 @@ func (s *GetMaintenanceWindowExecutionTaskInvocationOutput) SetWindowTargetId(v
type GetMaintenanceWindowExecutionTaskOutput struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you applied to your maintenance window
+ // task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The time the task execution completed.
EndTime *time.Time `type:"timestamp"`
@@ -32397,6 +33906,9 @@ type GetMaintenanceWindowExecutionTaskOutput struct {
// String and GoString methods.
TaskParameters []map[string]*MaintenanceWindowTaskParameterValueExpression `type:"list" sensitive:"true"`
+ // The CloudWatch alarms that were invoked by the maintenance window task.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
+
// The type of task that was run.
Type *string `type:"string" enum:"MaintenanceWindowTaskType"`
@@ -32422,6 +33934,12 @@ func (s GetMaintenanceWindowExecutionTaskOutput) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *GetMaintenanceWindowExecutionTaskOutput) SetAlarmConfiguration(v *AlarmConfiguration) *GetMaintenanceWindowExecutionTaskOutput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetEndTime sets the EndTime field's value.
func (s *GetMaintenanceWindowExecutionTaskOutput) SetEndTime(v time.Time) *GetMaintenanceWindowExecutionTaskOutput {
s.EndTime = &v
@@ -32488,6 +34006,12 @@ func (s *GetMaintenanceWindowExecutionTaskOutput) SetTaskParameters(v []map[stri
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *GetMaintenanceWindowExecutionTaskOutput) SetTriggeredAlarms(v []*AlarmStateInformation) *GetMaintenanceWindowExecutionTaskOutput {
+ s.TriggeredAlarms = v
+ return s
+}
+
// SetType sets the Type field's value.
func (s *GetMaintenanceWindowExecutionTaskOutput) SetType(v string) *GetMaintenanceWindowExecutionTaskOutput {
s.Type = &v
@@ -32790,6 +34314,10 @@ func (s *GetMaintenanceWindowTaskInput) SetWindowTaskId(v string) *GetMaintenanc
type GetMaintenanceWindowTaskOutput struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you applied to your maintenance window
+ // task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The action to take on tasks when the maintenance window cutoff time is reached.
// CONTINUE_TASK means that tasks continue to run. For Automation, Lambda, Step
// Functions tasks, CANCEL_TASK means that currently running task invocations
@@ -32895,6 +34423,12 @@ func (s GetMaintenanceWindowTaskOutput) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *GetMaintenanceWindowTaskOutput) SetAlarmConfiguration(v *AlarmConfiguration) *GetMaintenanceWindowTaskOutput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCutoffBehavior sets the CutoffBehavior field's value.
func (s *GetMaintenanceWindowTaskOutput) SetCutoffBehavior(v string) *GetMaintenanceWindowTaskOutput {
s.CutoffBehavior = &v
@@ -32988,6 +34522,9 @@ func (s *GetMaintenanceWindowTaskOutput) SetWindowTaskId(v string) *GetMaintenan
type GetOpsItemInput struct {
_ struct{} `type:"structure"`
+ // The OpsItem Amazon Resource Name (ARN).
+ OpsItemArn *string `min:"20" type:"string"`
+
// The ID of the OpsItem that you want to get.
//
// OpsItemId is a required field
@@ -33015,6 +34552,9 @@ func (s GetOpsItemInput) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *GetOpsItemInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "GetOpsItemInput"}
+ if s.OpsItemArn != nil && len(*s.OpsItemArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("OpsItemArn", 20))
+ }
if s.OpsItemId == nil {
invalidParams.Add(request.NewErrParamRequired("OpsItemId"))
}
@@ -33025,6 +34565,12 @@ func (s *GetOpsItemInput) Validate() error {
return nil
}
+// SetOpsItemArn sets the OpsItemArn field's value.
+func (s *GetOpsItemInput) SetOpsItemArn(v string) *GetOpsItemInput {
+ s.OpsItemArn = &v
+ return s
+}
+
// SetOpsItemId sets the OpsItemId field's value.
func (s *GetOpsItemInput) SetOpsItemId(v string) *GetOpsItemInput {
s.OpsItemId = &v
@@ -33366,7 +34912,9 @@ type GetParameterHistoryInput struct {
// results.
MaxResults *int64 `min:"1" type:"integer"`
- // The name of the parameter for which you want to review history.
+ // The name or Amazon Resource Name (ARN) of the parameter for which you want
+ // to review history. For parameters shared with you from another account, you
+ // must use the full ARN.
//
// Name is a required field
Name *string `min:"1" type:"string" required:"true"`
@@ -33485,11 +35033,17 @@ func (s *GetParameterHistoryOutput) SetParameters(v []*ParameterHistory) *GetPar
type GetParameterInput struct {
_ struct{} `type:"structure"`
- // The name of the parameter you want to query.
+ // The name or Amazon Resource Name (ARN) of the parameter that you want to
+ // query. For parameters shared with you from another account, you must use
+ // the full ARN.
//
// To query by parameter label, use "Name": "name:label". To query by parameter
// version, use "Name": "name:version".
//
+ // For more information about shared parameters, see Working with shared parameters
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/sharing.html)
+ // in the Amazon Web Services Systems Manager User Guide.
+ //
// Name is a required field
Name *string `min:"1" type:"string" required:"true"`
@@ -33744,11 +35298,20 @@ func (s *GetParametersByPathOutput) SetParameters(v []*Parameter) *GetParameters
type GetParametersInput struct {
_ struct{} `type:"structure"`
- // Names of the parameters for which you want to query information.
+ // The names or Amazon Resource Names (ARNs) of the parameters that you want
+ // to query. For parameters shared with you from another account, you must use
+ // the full ARNs.
//
// To query by parameter label, use "Name": "name:label". To query by parameter
// version, use "Name": "name:version".
//
+ // The results for GetParameters requests are listed in alphabetical order in
+ // query responses.
+ //
+ // For information about shared parameters, see Working with shared parameters
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-shared-parameters.html)
+ // in the Amazon Web Services Systems Manager User Guide.
+ //
// Names is a required field
Names []*string `min:"1" type:"list" required:"true"`
@@ -34171,12 +35734,187 @@ func (s *GetPatchBaselineOutput) SetSources(v []*PatchSource) *GetPatchBaselineO
return s
}
+type GetResourcePoliciesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum number of items to return for this call. The call also returns
+ // a token that you can specify in a subsequent call to get the next set of
+ // results.
+ MaxResults *int64 `min:"1" type:"integer"`
+
+ // A token to start the list. Use this token to get the next set of results.
+ NextToken *string `type:"string"`
+
+ // Amazon Resource Name (ARN) of the resource to which the policies are attached.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"20" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetResourcePoliciesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetResourcePoliciesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *GetResourcePoliciesInput) SetMaxResults(v int64) *GetResourcePoliciesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetResourcePoliciesInput) SetNextToken(v string) *GetResourcePoliciesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *GetResourcePoliciesInput) SetResourceArn(v string) *GetResourcePoliciesInput {
+ s.ResourceArn = &v
+ return s
+}
+
+type GetResourcePoliciesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The token for the next set of items to return. Use this token to get the
+ // next set of results.
+ NextToken *string `type:"string"`
+
+ // An array of the Policy object.
+ Policies []*GetResourcePoliciesResponseEntry `type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetResourcePoliciesOutput) SetNextToken(v string) *GetResourcePoliciesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetPolicies sets the Policies field's value.
+func (s *GetResourcePoliciesOutput) SetPolicies(v []*GetResourcePoliciesResponseEntry) *GetResourcePoliciesOutput {
+ s.Policies = v
+ return s
+}
+
+// A resource policy helps you to define the IAM entity (for example, an Amazon
+// Web Services account) that can manage your Systems Manager resources. Currently,
+// OpsItemGroup is the only resource that supports Systems Manager resource
+// policies. The resource policy for OpsItemGroup enables Amazon Web Services
+// accounts to view and interact with OpsCenter operational work items (OpsItems).
+type GetResourcePoliciesResponseEntry struct {
+ _ struct{} `type:"structure"`
+
+ // A resource policy helps you to define the IAM entity (for example, an Amazon
+ // Web Services account) that can manage your Systems Manager resources. Currently,
+ // OpsItemGroup is the only resource that supports Systems Manager resource
+ // policies. The resource policy for OpsItemGroup enables Amazon Web Services
+ // accounts to view and interact with OpsCenter operational work items (OpsItems).
+ Policy *string `type:"string"`
+
+ // ID of the current policy version. The hash helps to prevent a situation where
+ // multiple users attempt to overwrite a policy. You must provide this hash
+ // when updating or deleting a policy.
+ PolicyHash *string `type:"string"`
+
+ // A policy ID.
+ PolicyId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesResponseEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s GetResourcePoliciesResponseEntry) GoString() string {
+ return s.String()
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetResourcePoliciesResponseEntry) SetPolicy(v string) *GetResourcePoliciesResponseEntry {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyHash sets the PolicyHash field's value.
+func (s *GetResourcePoliciesResponseEntry) SetPolicyHash(v string) *GetResourcePoliciesResponseEntry {
+ s.PolicyHash = &v
+ return s
+}
+
+// SetPolicyId sets the PolicyId field's value.
+func (s *GetResourcePoliciesResponseEntry) SetPolicyId(v string) *GetResourcePoliciesResponseEntry {
+ s.PolicyId = &v
+ return s
+}
+
// The request body of the GetServiceSetting API operation.
type GetServiceSettingInput struct {
_ struct{} `type:"structure"`
// The ID of the service setting to get. The setting ID can be one of the following.
//
+ // * /ssm/managed-instance/default-ec2-instance-management-role
+ //
// * /ssm/automation/customer-script-log-destination
//
// * /ssm/automation/customer-script-log-group-name
@@ -34274,8 +36012,8 @@ type HierarchyLevelLimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
- // A hierarchy can have a maximum of 15 levels. For more information, see Requirements
- // and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html)
+ // A hierarchy can have a maximum of 15 levels. For more information, see About
+ // requirements and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-su-create.html#sysman-parameter-name-constraints)
// in the Amazon Web Services Systems Manager User Guide.
Message_ *string `locationName:"message" type:"string"`
}
@@ -34543,7 +36281,7 @@ type InstanceAggregatedAssociationOverview struct {
// Detailed status information about the aggregated associations.
DetailedStatus *string `type:"string"`
- // The number of associations for the managed node(s).
+ // The number of associations for the managed nodes.
InstanceAssociationStatusAggregatedCount map[string]*int64 `type:"map"`
}
@@ -34587,7 +36325,7 @@ type InstanceAssociation struct {
// Version information for the association on the managed node.
AssociationVersion *string `type:"string"`
- // The content of the association document for the managed node(s).
+ // The content of the association document for the managed nodes.
Content *string `min:"1" type:"string"`
// The managed node ID.
@@ -34639,7 +36377,7 @@ func (s *InstanceAssociation) SetInstanceId(v string) *InstanceAssociation {
// An S3 bucket where you want to store the results of this request.
//
// For the minimal permissions required to enable Amazon S3 output for an association,
-// see Creating associations (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-state-assoc.html)
+// see Create an association (console) (https://docs.aws.amazon.com/systems-manager/latest/userguide/state-manager-associations-creating.html#state-manager-associations-console)
// in the Systems Manager User Guide.
type InstanceAssociationOutputLocation struct {
_ struct{} `type:"structure"`
@@ -34871,14 +36609,18 @@ type InstanceInformation struct {
ComputerName *string `min:"1" type:"string"`
// The IP address of the managed node.
- IPAddress *string `min:"1" type:"string"`
+ //
+ // IPAddress is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by InstanceInformation's
+ // String and GoString methods.
+ IPAddress *string `min:"1" type:"string" sensitive:"true"`
// The Identity and Access Management (IAM) role assigned to the on-premises
// Systems Manager managed node. This call doesn't return the IAM role for Amazon
// Elastic Compute Cloud (Amazon EC2) instances. To retrieve the IAM role for
// an EC2 instance, use the Amazon EC2 DescribeInstances operation. For information,
// see DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
- // in the Amazon EC2 API Reference or describe-instances (https://docs.aws.amazon.com/cli/latest/ec2/describe-instances.html)
+ // in the Amazon EC2 API Reference or describe-instances (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html)
// in the Amazon Web Services CLI Command Reference.
IamRole *string `type:"string"`
@@ -34905,11 +36647,11 @@ type InstanceInformation struct {
// specified as the DefaultInstanceName property using the CreateActivation
// command. It is applied to the managed node by specifying the Activation Code
// and Activation ID when you install SSM Agent on the node, as explained in
- // Install SSM Agent for a hybrid environment (Linux) (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html)
- // and Install SSM Agent for a hybrid environment (Windows) (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html).
+ // Install SSM Agent for a hybrid and multicloud environment (Linux) (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html)
+ // and Install SSM Agent for a hybrid and multicloud environment (Windows) (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html).
// To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances
// operation. For information, see DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
- // in the Amazon EC2 API Reference or describe-instances (https://docs.aws.amazon.com/cli/latest/ec2/describe-instances.html)
+ // in the Amazon EC2 API Reference or describe-instances (https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html)
// in the Amazon Web Services CLI Command Reference.
Name *string `type:"string"`
@@ -35160,14 +36902,26 @@ func (s *InstanceInformationFilter) SetValueSet(v []*string) *InstanceInformatio
type InstanceInformationStringFilter struct {
_ struct{} `type:"structure"`
- // The filter key name to describe your managed nodes. For example:
+ // The filter key name to describe your managed nodes.
+ //
+ // Valid filter key values: ActivationIds | AgentVersion | AssociationStatus
+ // | IamRole | InstanceIds | PingStatus | PlatformTypes | ResourceType | SourceIds
+ // | SourceTypes | "tag-key" | "tag:{keyname}
+ //
+ // * Valid values for the AssociationStatus filter key: Success | Pending
+ // | Failed
+ //
+ // * Valid values for the PingStatus filter key: Online | ConnectionLost
+ // | Inactive (deprecated)
+ //
+ // * Valid values for the PlatformType filter key: Windows | Linux | MacOS
+ //
+ // * Valid values for the ResourceType filter key: EC2Instance | ManagedInstance
//
- // "InstanceIds" | "AgentVersion" | "PingStatus" | "PlatformTypes" | "ActivationIds"
- // | "IamRole" | "ResourceType" | "AssociationStatus" | "tag-key" | "tag:{keyname}
+ // * Valid values for the SourceType filter key: AWS::EC2::Instance | AWS::SSM::ManagedInstance
+ // | AWS::IoT::Thing
//
- // Tag Key isn't a valid filter. You must specify either tag-key or tag:{keyname}
- // and a string. Here are some valid examples: tag-key, tag:123, tag:al!, tag:Windows.
- // Here are some invalid examples: tag-keys, Tag Key, tag:, tagKey, abc:keyname.
+ // * Valid tag examples: Key=tag-key,Values=Purpose | Key=tag:Purpose,Values=Test.
//
// Key is a required field
Key *string `min:"1" type:"string" required:"true"`
@@ -35242,11 +36996,10 @@ type InstancePatchState struct {
// BaselineId is a required field
BaselineId *string `min:"20" type:"string" required:"true"`
- // The number of managed nodes where patches that are specified as Critical
- // for compliance reporting in the patch baseline aren't installed. These patches
- // might be missing, have failed installation, were rejected, or were installed
- // but awaiting a required managed node reboot. The status of these managed
- // nodes is NON_COMPLIANT.
+ // The number of patches per node that are specified as Critical for compliance
+ // reporting in the patch baseline aren't installed. These patches might be
+ // missing, have failed installation, were rejected, or were installed but awaiting
+ // a required managed node reboot. The status of these managed nodes is NON_COMPLIANT.
CriticalNonCompliantCount *int64 `type:"integer"`
// The number of patches from the patch baseline that were attempted to be installed
@@ -35260,8 +37013,8 @@ type InstancePatchState struct {
// baseline.
//
// For more information about the InstallOverrideList parameter, see About the
- // AWS-RunPatchBaseline (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html)
- // SSM document in the Amazon Web Services Systems Manager User Guide.
+ // AWS-RunPatchBaseline SSM document (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html)
+ // in the Amazon Web Services Systems Manager User Guide.
InstallOverrideList *string `min:"1" type:"string"`
// The number of patches from the patch baseline that are installed on the managed
@@ -35323,9 +37076,9 @@ type InstancePatchState struct {
// OperationStartTime is a required field
OperationStartTime *time.Time `type:"timestamp" required:"true"`
- // The number of managed nodes with patches installed that are specified as
- // other than Critical or Security but aren't compliant with the patch baseline.
- // The status of these managed nodes is NON_COMPLIANT.
+ // The number of patches per node that are specified as other than Critical
+ // or Security but aren't compliant with the patch baseline. The status of these
+ // managed nodes is NON_COMPLIANT.
OtherNonCompliantCount *int64 `type:"integer"`
// Placeholder information. This field will always be empty in the current release
@@ -35356,10 +37109,10 @@ type InstancePatchState struct {
// until a reboot is performed.
RebootOption *string `type:"string" enum:"RebootOption"`
- // The number of managed nodes where patches that are specified as Security
- // in a patch advisory aren't installed. These patches might be missing, have
- // failed installation, were rejected, or were installed but awaiting a required
- // managed node reboot. The status of these managed nodes is NON_COMPLIANT.
+ // The number of patches per node that are specified as Security in a patch
+ // advisory aren't installed. These patches might be missing, have failed installation,
+ // were rejected, or were installed but awaiting a required managed node reboot.
+ // The status of these managed nodes is NON_COMPLIANT.
SecurityNonCompliantCount *int64 `type:"integer"`
// The ID of the patch baseline snapshot used during the patching operation
@@ -35629,6 +37382,414 @@ func (s *InstancePatchStateFilter) SetValues(v []*string) *InstancePatchStateFil
return s
}
+// An object containing various properties of a managed node.
+type InstanceProperty struct {
+ _ struct{} `type:"structure"`
+
+ // The activation ID created by Systems Manager when the server or virtual machine
+ // (VM) was registered
+ ActivationId *string `type:"string"`
+
+ // The version of SSM Agent running on your managed node.
+ AgentVersion *string `type:"string"`
+
+ // The CPU architecture of the node. For example, x86_64.
+ Architecture *string `type:"string"`
+
+ // Status information about the aggregated associations.
+ AssociationOverview *InstanceAggregatedAssociationOverview `type:"structure"`
+
+ // The status of the State Manager association applied to the managed node.
+ AssociationStatus *string `type:"string"`
+
+ // The fully qualified host name of the managed node.
+ ComputerName *string `min:"1" type:"string"`
+
+ // The public IPv4 address assigned to the node. If a public IPv4 address isn't
+ // assigned to the node, this value is blank.
+ //
+ // IPAddress is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by InstanceProperty's
+ // String and GoString methods.
+ IPAddress *string `min:"1" type:"string" sensitive:"true"`
+
+ // The IAM role used in the hybrid activation to register the node with Systems
+ // Manager.
+ IamRole *string `type:"string"`
+
+ // The ID of the managed node.
+ InstanceId *string `type:"string"`
+
+ // The instance profile attached to the node. If an instance profile isn't attached
+ // to the node, this value is blank.
+ InstanceRole *string `type:"string"`
+
+ // The current state of the node.
+ InstanceState *string `type:"string"`
+
+ // The instance type of the managed node. For example, t3.large.
+ InstanceType *string `type:"string"`
+
+ // The name of the key pair associated with the node. If a key pair isnt't associated
+ // with the node, this value is blank.
+ KeyName *string `type:"string"`
+
+ // The date the association was last run.
+ LastAssociationExecutionDate *time.Time `type:"timestamp"`
+
+ // The date and time when the SSM Agent last pinged the Systems Manager service.
+ LastPingDateTime *time.Time `type:"timestamp"`
+
+ // The last date the association was successfully run.
+ LastSuccessfulAssociationExecutionDate *time.Time `type:"timestamp"`
+
+ // The timestamp for when the node was launched.
+ LaunchTime *time.Time `type:"timestamp"`
+
+ // The value of the EC2 Name tag associated with the node. If a Name tag hasn't
+ // been applied to the node, this value is blank.
+ Name *string `type:"string"`
+
+ // Connection status of the SSM Agent on the managed node.
+ PingStatus *string `type:"string" enum:"PingStatus"`
+
+ // The name of the operating system platform running on your managed node.
+ PlatformName *string `type:"string"`
+
+ // The operating system platform type of the managed node. For example, Windows.
+ PlatformType *string `type:"string" enum:"PlatformType"`
+
+ // The version of the OS platform running on your managed node.
+ PlatformVersion *string `type:"string"`
+
+ // The date the node was registered with Systems Manager.
+ RegistrationDate *time.Time `type:"timestamp"`
+
+ // The type of managed node.
+ ResourceType *string `type:"string"`
+
+ // The ID of the source resource.
+ SourceId *string `type:"string"`
+
+ // The type of the source resource.
+ SourceType *string `type:"string" enum:"SourceType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstanceProperty) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstanceProperty) GoString() string {
+ return s.String()
+}
+
+// SetActivationId sets the ActivationId field's value.
+func (s *InstanceProperty) SetActivationId(v string) *InstanceProperty {
+ s.ActivationId = &v
+ return s
+}
+
+// SetAgentVersion sets the AgentVersion field's value.
+func (s *InstanceProperty) SetAgentVersion(v string) *InstanceProperty {
+ s.AgentVersion = &v
+ return s
+}
+
+// SetArchitecture sets the Architecture field's value.
+func (s *InstanceProperty) SetArchitecture(v string) *InstanceProperty {
+ s.Architecture = &v
+ return s
+}
+
+// SetAssociationOverview sets the AssociationOverview field's value.
+func (s *InstanceProperty) SetAssociationOverview(v *InstanceAggregatedAssociationOverview) *InstanceProperty {
+ s.AssociationOverview = v
+ return s
+}
+
+// SetAssociationStatus sets the AssociationStatus field's value.
+func (s *InstanceProperty) SetAssociationStatus(v string) *InstanceProperty {
+ s.AssociationStatus = &v
+ return s
+}
+
+// SetComputerName sets the ComputerName field's value.
+func (s *InstanceProperty) SetComputerName(v string) *InstanceProperty {
+ s.ComputerName = &v
+ return s
+}
+
+// SetIPAddress sets the IPAddress field's value.
+func (s *InstanceProperty) SetIPAddress(v string) *InstanceProperty {
+ s.IPAddress = &v
+ return s
+}
+
+// SetIamRole sets the IamRole field's value.
+func (s *InstanceProperty) SetIamRole(v string) *InstanceProperty {
+ s.IamRole = &v
+ return s
+}
+
+// SetInstanceId sets the InstanceId field's value.
+func (s *InstanceProperty) SetInstanceId(v string) *InstanceProperty {
+ s.InstanceId = &v
+ return s
+}
+
+// SetInstanceRole sets the InstanceRole field's value.
+func (s *InstanceProperty) SetInstanceRole(v string) *InstanceProperty {
+ s.InstanceRole = &v
+ return s
+}
+
+// SetInstanceState sets the InstanceState field's value.
+func (s *InstanceProperty) SetInstanceState(v string) *InstanceProperty {
+ s.InstanceState = &v
+ return s
+}
+
+// SetInstanceType sets the InstanceType field's value.
+func (s *InstanceProperty) SetInstanceType(v string) *InstanceProperty {
+ s.InstanceType = &v
+ return s
+}
+
+// SetKeyName sets the KeyName field's value.
+func (s *InstanceProperty) SetKeyName(v string) *InstanceProperty {
+ s.KeyName = &v
+ return s
+}
+
+// SetLastAssociationExecutionDate sets the LastAssociationExecutionDate field's value.
+func (s *InstanceProperty) SetLastAssociationExecutionDate(v time.Time) *InstanceProperty {
+ s.LastAssociationExecutionDate = &v
+ return s
+}
+
+// SetLastPingDateTime sets the LastPingDateTime field's value.
+func (s *InstanceProperty) SetLastPingDateTime(v time.Time) *InstanceProperty {
+ s.LastPingDateTime = &v
+ return s
+}
+
+// SetLastSuccessfulAssociationExecutionDate sets the LastSuccessfulAssociationExecutionDate field's value.
+func (s *InstanceProperty) SetLastSuccessfulAssociationExecutionDate(v time.Time) *InstanceProperty {
+ s.LastSuccessfulAssociationExecutionDate = &v
+ return s
+}
+
+// SetLaunchTime sets the LaunchTime field's value.
+func (s *InstanceProperty) SetLaunchTime(v time.Time) *InstanceProperty {
+ s.LaunchTime = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *InstanceProperty) SetName(v string) *InstanceProperty {
+ s.Name = &v
+ return s
+}
+
+// SetPingStatus sets the PingStatus field's value.
+func (s *InstanceProperty) SetPingStatus(v string) *InstanceProperty {
+ s.PingStatus = &v
+ return s
+}
+
+// SetPlatformName sets the PlatformName field's value.
+func (s *InstanceProperty) SetPlatformName(v string) *InstanceProperty {
+ s.PlatformName = &v
+ return s
+}
+
+// SetPlatformType sets the PlatformType field's value.
+func (s *InstanceProperty) SetPlatformType(v string) *InstanceProperty {
+ s.PlatformType = &v
+ return s
+}
+
+// SetPlatformVersion sets the PlatformVersion field's value.
+func (s *InstanceProperty) SetPlatformVersion(v string) *InstanceProperty {
+ s.PlatformVersion = &v
+ return s
+}
+
+// SetRegistrationDate sets the RegistrationDate field's value.
+func (s *InstanceProperty) SetRegistrationDate(v time.Time) *InstanceProperty {
+ s.RegistrationDate = &v
+ return s
+}
+
+// SetResourceType sets the ResourceType field's value.
+func (s *InstanceProperty) SetResourceType(v string) *InstanceProperty {
+ s.ResourceType = &v
+ return s
+}
+
+// SetSourceId sets the SourceId field's value.
+func (s *InstanceProperty) SetSourceId(v string) *InstanceProperty {
+ s.SourceId = &v
+ return s
+}
+
+// SetSourceType sets the SourceType field's value.
+func (s *InstanceProperty) SetSourceType(v string) *InstanceProperty {
+ s.SourceType = &v
+ return s
+}
+
+// Describes a filter for a specific list of managed nodes. You can filter node
+// information by using tags. You specify tags by using a key-value mapping.
+type InstancePropertyFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the filter.
+ //
+ // Key is a required field
+ Key *string `locationName:"key" type:"string" required:"true" enum:"InstancePropertyFilterKey"`
+
+ // The filter values.
+ //
+ // ValueSet is a required field
+ ValueSet []*string `locationName:"valueSet" min:"1" type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstancePropertyFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstancePropertyFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InstancePropertyFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InstancePropertyFilter"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.ValueSet == nil {
+ invalidParams.Add(request.NewErrParamRequired("ValueSet"))
+ }
+ if s.ValueSet != nil && len(s.ValueSet) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ValueSet", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *InstancePropertyFilter) SetKey(v string) *InstancePropertyFilter {
+ s.Key = &v
+ return s
+}
+
+// SetValueSet sets the ValueSet field's value.
+func (s *InstancePropertyFilter) SetValueSet(v []*string) *InstancePropertyFilter {
+ s.ValueSet = v
+ return s
+}
+
+// The filters to describe or get information about your managed nodes.
+type InstancePropertyStringFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The filter key name to describe your managed nodes.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // The operator used by the filter call.
+ Operator *string `type:"string" enum:"InstancePropertyFilterOperator"`
+
+ // The filter key name to describe your managed nodes.
+ //
+ // Values is a required field
+ Values []*string `min:"1" type:"list" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstancePropertyStringFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InstancePropertyStringFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InstancePropertyStringFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InstancePropertyStringFilter"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Values == nil {
+ invalidParams.Add(request.NewErrParamRequired("Values"))
+ }
+ if s.Values != nil && len(s.Values) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Values", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *InstancePropertyStringFilter) SetKey(v string) *InstancePropertyStringFilter {
+ s.Key = &v
+ return s
+}
+
+// SetOperator sets the Operator field's value.
+func (s *InstancePropertyStringFilter) SetOperator(v string) *InstancePropertyStringFilter {
+ s.Operator = &v
+ return s
+}
+
+// SetValues sets the Values field's value.
+func (s *InstancePropertyStringFilter) SetValues(v []*string) *InstancePropertyStringFilter {
+ s.Values = v
+ return s
+}
+
// An error occurred on the server side.
type InternalServerError struct {
_ struct{} `type:"structure"`
@@ -37124,14 +39285,15 @@ func (s *InvalidFilterValue) RequestID() string {
//
// - You don't have permission to access the managed node.
//
-// - Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+// - Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// - SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
-// - The managed node isn't in valid state. Valid states are: Running, Pending,
-// Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+// - The managed node isn't in a valid state. Valid states are: Running,
+// Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+// Terminated.
type InvalidInstanceId struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -37259,6 +39421,70 @@ func (s *InvalidInstanceInformationFilterValue) RequestID() string {
return s.RespMetadata.RequestID
}
+// The specified filter value isn't valid.
+type InvalidInstancePropertyFilterValue struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidInstancePropertyFilterValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidInstancePropertyFilterValue) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidInstancePropertyFilterValue(v protocol.ResponseMetadata) error {
+ return &InvalidInstancePropertyFilterValue{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidInstancePropertyFilterValue) Code() string {
+ return "InvalidInstancePropertyFilterValue"
+}
+
+// Message returns the exception's message.
+func (s *InvalidInstancePropertyFilterValue) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidInstancePropertyFilterValue) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidInstancePropertyFilterValue) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidInstancePropertyFilterValue) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidInstancePropertyFilterValue) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// The specified inventory group isn't valid.
type InvalidInventoryGroupException struct {
_ struct{} `type:"structure"`
@@ -38425,7 +40651,8 @@ func (s *InvalidResultAttributeException) RequestID() string {
// The role name can't contain invalid characters. Also verify that you specified
// an IAM role for notifications that includes the required trust policy. For
// information about configuring the IAM role for Run Command notifications,
-// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html)
+// see Monitoring Systems Manager status changes using Amazon SNS notifications
+// (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html)
// in the Amazon Web Services Systems Manager User Guide.
type InvalidRole struct {
_ struct{} `type:"structure"`
@@ -38554,7 +40781,7 @@ func (s *InvalidSchedule) RequestID() string {
return s.RespMetadata.RequestID
}
-// The specified tag key or value is not valid.
+// The specified tag key or value isn't valid.
type InvalidTag struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -39875,6 +42102,9 @@ type LabelParameterVersionInput struct {
// The parameter name on which you want to attach one or more labels.
//
+ // You can't enter the Amazon Resource Name (ARN) for a parameter, only the
+ // parameter name itself.
+ //
// Name is a required field
Name *string `min:"1" type:"string" required:"true"`
@@ -39946,7 +42176,7 @@ type LabelParameterVersionOutput struct {
_ struct{} `type:"structure"`
// The label doesn't meet the requirements. For information about parameter
- // label requirements, see Labeling parameters (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html)
+ // label requirements, see Working with parameter labels (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html)
// in the Amazon Web Services Systems Manager User Guide.
InvalidLabels []*string `min:"1" type:"list"`
@@ -41308,10 +43538,10 @@ func (s *ListInventoryEntriesInput) SetTypeName(v string) *ListInventoryEntriesI
type ListInventoryEntriesOutput struct {
_ struct{} `type:"structure"`
- // The time that inventory information was collected for the managed node(s).
+ // The time that inventory information was collected for the managed nodes.
CaptureTime *string `type:"string"`
- // A list of inventory items on the managed node(s).
+ // A list of inventory items on the managed nodes.
Entries []map[string]*string `type:"list"`
// The managed node ID targeted by the request to query inventory information.
@@ -41321,7 +43551,7 @@ type ListInventoryEntriesOutput struct {
// items to return, the string is empty.
NextToken *string `type:"string"`
- // The inventory schema version used by the managed node(s).
+ // The inventory schema version used by the managed nodes.
SchemaVersion *string `type:"string"`
// The type of inventory item returned by the request.
@@ -42070,7 +44300,7 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut
type LoggingInfo struct {
_ struct{} `type:"structure"`
- // The name of an S3 bucket where execution logs are stored .
+ // The name of an S3 bucket where execution logs are stored.
//
// S3BucketName is a required field
S3BucketName *string `min:"3" type:"string" required:"true"`
@@ -42295,6 +44525,9 @@ func (s *MaintenanceWindowExecution) SetWindowId(v string) *MaintenanceWindowExe
type MaintenanceWindowExecutionTaskIdentity struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm applied to your maintenance window task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The time the task execution finished.
EndTime *time.Time `type:"timestamp"`
@@ -42317,6 +44550,9 @@ type MaintenanceWindowExecutionTaskIdentity struct {
// The type of task that ran.
TaskType *string `type:"string" enum:"MaintenanceWindowTaskType"`
+ // The CloudWatch alarm that was invoked by the maintenance window task.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
+
// The ID of the maintenance window execution that ran the task.
WindowExecutionId *string `min:"36" type:"string"`
}
@@ -42339,6 +44575,12 @@ func (s MaintenanceWindowExecutionTaskIdentity) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *MaintenanceWindowExecutionTaskIdentity) SetAlarmConfiguration(v *AlarmConfiguration) *MaintenanceWindowExecutionTaskIdentity {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetEndTime sets the EndTime field's value.
func (s *MaintenanceWindowExecutionTaskIdentity) SetEndTime(v time.Time) *MaintenanceWindowExecutionTaskIdentity {
s.EndTime = &v
@@ -42381,6 +44623,12 @@ func (s *MaintenanceWindowExecutionTaskIdentity) SetTaskType(v string) *Maintena
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *MaintenanceWindowExecutionTaskIdentity) SetTriggeredAlarms(v []*AlarmStateInformation) *MaintenanceWindowExecutionTaskIdentity {
+ s.TriggeredAlarms = v
+ return s
+}
+
// SetWindowExecutionId sets the WindowExecutionId field's value.
func (s *MaintenanceWindowExecutionTaskIdentity) SetWindowExecutionId(v string) *MaintenanceWindowExecutionTaskIdentity {
s.WindowExecutionId = &v
@@ -43239,6 +45487,9 @@ func (s *MaintenanceWindowTarget) SetWindowTargetId(v string) *MaintenanceWindow
type MaintenanceWindowTask struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm applied to your maintenance window task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The specification for whether tasks should continue to run after the cutoff
// time specified in the maintenance windows is reached.
CutoffBehavior *string `type:"string" enum:"MaintenanceWindowTaskCutoffBehavior"`
@@ -43345,6 +45596,12 @@ func (s MaintenanceWindowTask) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *MaintenanceWindowTask) SetAlarmConfiguration(v *AlarmConfiguration) *MaintenanceWindowTask {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCutoffBehavior sets the CutoffBehavior field's value.
func (s *MaintenanceWindowTask) SetCutoffBehavior(v string) *MaintenanceWindowTask {
s.CutoffBehavior = &v
@@ -43555,6 +45812,71 @@ func (s *MaintenanceWindowTaskParameterValueExpression) SetValues(v []*string) *
return s
}
+// The specified policy document is malformed or invalid, or excessive PutResourcePolicy
+// or DeleteResourcePolicy calls have been made.
+type MalformedResourcePolicyDocumentException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MalformedResourcePolicyDocumentException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s MalformedResourcePolicyDocumentException) GoString() string {
+ return s.String()
+}
+
+func newErrorMalformedResourcePolicyDocumentException(v protocol.ResponseMetadata) error {
+ return &MalformedResourcePolicyDocumentException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *MalformedResourcePolicyDocumentException) Code() string {
+ return "MalformedResourcePolicyDocumentException"
+}
+
+// Message returns the exception's message.
+func (s *MalformedResourcePolicyDocumentException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *MalformedResourcePolicyDocumentException) OrigErr() error {
+ return nil
+}
+
+func (s *MalformedResourcePolicyDocumentException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *MalformedResourcePolicyDocumentException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *MalformedResourcePolicyDocumentException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// The size limit of a document is 64 KB.
type MaxDocumentSizeExceeded struct {
_ struct{} `type:"structure"`
@@ -43667,15 +45989,14 @@ func (s *MetadataValue) SetValue(v string) *MetadataValue {
type ModifyDocumentPermissionInput struct {
_ struct{} `type:"structure"`
- // The Amazon Web Services user accounts that should have access to the document.
- // The account IDs can either be a group of account IDs or All.
+ // The Amazon Web Services users that should have access to the document. The
+ // account IDs can either be a group of account IDs or All.
AccountIdsToAdd []*string `type:"list"`
- // The Amazon Web Services user accounts that should no longer have access to
- // the document. The Amazon Web Services user account can either be a group
- // of account IDs or All. This action has a higher priority than AccountIdsToAdd.
- // If you specify an account ID to add and the same ID to remove, the system
- // removes access to the document.
+ // The Amazon Web Services users that should no longer have access to the document.
+ // The Amazon Web Services user can either be a group of account IDs or All.
+ // This action has a higher priority than AccountIdsToAdd. If you specify an
+ // ID to add and the same ID to remove, the system removes access to the document.
AccountIdsToRemove []*string `type:"list"`
// The name of the document that you want to share.
@@ -44179,7 +46500,7 @@ func (s *OpsFilter) SetValues(v []*string) *OpsFilter {
// timeline graph. For the Amazon Web Services resource, OpsCenter aggregates
// information from Config, CloudTrail logs, and EventBridge, so you don't have
// to navigate across multiple console pages during your investigation. For
-// more information, see OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
+// more information, see Amazon Web Services Systems Manager OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html)
// in the Amazon Web Services Systems Manager User Guide.
type OpsItem struct {
_ struct{} `type:"structure"`
@@ -44234,15 +46555,26 @@ type OpsItem struct {
// Use the /aws/resources key in OperationalData to specify a related resource
// in the request. Use the /aws/automations key in OperationalData to associate
// an Automation runbook with the OpsItem. To view Amazon Web Services CLI example
- // commands that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems)
+ // commands that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-manually-create-OpsItems.html)
// in the Amazon Web Services Systems Manager User Guide.
OperationalData map[string]*OpsItemDataValue `type:"map"`
+ // The OpsItem Amazon Resource Name (ARN).
+ OpsItemArn *string `min:"20" type:"string"`
+
// The ID of the OpsItem.
OpsItemId *string `type:"string"`
- // The type of OpsItem. Currently, the only valid values are /aws/changerequest
- // and /aws/issue.
+ // The type of OpsItem. Systems Manager supports the following types of OpsItems:
+ //
+ // * /aws/issue This type of OpsItem is used for default OpsItems created
+ // by OpsCenter.
+ //
+ // * /aws/changerequest This type of OpsItem is used by Change Manager for
+ // reviewing and approving or rejecting change requests.
+ //
+ // * /aws/insight This type of OpsItem is used by OpsCenter for aggregating
+ // and reporting on duplicate OpsItems.
OpsItemType *string `type:"string"`
// The time specified in a change request for a runbook workflow to end. Currently
@@ -44360,6 +46692,12 @@ func (s *OpsItem) SetOperationalData(v map[string]*OpsItemDataValue) *OpsItem {
return s
}
+// SetOpsItemArn sets the OpsItemArn field's value.
+func (s *OpsItem) SetOpsItemArn(v string) *OpsItem {
+ s.OpsItemArn = &v
+ return s
+}
+
// SetOpsItemId sets the OpsItemId field's value.
func (s *OpsItem) SetOpsItemId(v string) *OpsItem {
s.OpsItemId = &v
@@ -44426,6 +46764,72 @@ func (s *OpsItem) SetVersion(v string) *OpsItem {
return s
}
+// You don't have permission to view OpsItems in the specified account. Verify
+// that your account is configured either as a Systems Manager delegated administrator
+// or that you are logged into the Organizations management account.
+type OpsItemAccessDeniedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OpsItemAccessDeniedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OpsItemAccessDeniedException) GoString() string {
+ return s.String()
+}
+
+func newErrorOpsItemAccessDeniedException(v protocol.ResponseMetadata) error {
+ return &OpsItemAccessDeniedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *OpsItemAccessDeniedException) Code() string {
+ return "OpsItemAccessDeniedException"
+}
+
+// Message returns the exception's message.
+func (s *OpsItemAccessDeniedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *OpsItemAccessDeniedException) OrigErr() error {
+ return nil
+}
+
+func (s *OpsItemAccessDeniedException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *OpsItemAccessDeniedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *OpsItemAccessDeniedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// The OpsItem already exists.
type OpsItemAlreadyExistsException struct {
_ struct{} `type:"structure"`
@@ -44492,6 +46896,70 @@ func (s *OpsItemAlreadyExistsException) RequestID() string {
return s.RespMetadata.RequestID
}
+// The specified OpsItem is in the process of being deleted.
+type OpsItemConflictException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OpsItemConflictException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s OpsItemConflictException) GoString() string {
+ return s.String()
+}
+
+func newErrorOpsItemConflictException(v protocol.ResponseMetadata) error {
+ return &OpsItemConflictException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *OpsItemConflictException) Code() string {
+ return "OpsItemConflictException"
+}
+
+// Message returns the exception's message.
+func (s *OpsItemConflictException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *OpsItemConflictException) OrigErr() error {
+ return nil
+}
+
+func (s *OpsItemConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *OpsItemConflictException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *OpsItemConflictException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// An object that defines the value of the key and its type in the OperationalData
// map.
type OpsItemDataValue struct {
@@ -44874,8 +47342,7 @@ func (s *OpsItemInvalidParameterException) RequestID() string {
return s.RespMetadata.RequestID
}
-// The request caused OpsItems to exceed one or more quotas. For information
-// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+// The request caused OpsItems to exceed one or more quotas.
type OpsItemLimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -45389,8 +47856,16 @@ type OpsItemSummary struct {
// The ID of the OpsItem.
OpsItemId *string `type:"string"`
- // The type of OpsItem. Currently, the only valid values are /aws/changerequest
- // and /aws/issue.
+ // The type of OpsItem. Systems Manager supports the following types of OpsItems:
+ //
+ // * /aws/issue This type of OpsItem is used for default OpsItems created
+ // by OpsCenter.
+ //
+ // * /aws/changerequest This type of OpsItem is used by Change Manager for
+ // reviewing and approving or rejecting change requests.
+ //
+ // * /aws/insight This type of OpsItem is used by OpsCenter for aggregating
+ // and reporting on duplicate OpsItems.
OpsItemType *string `type:"string"`
// The time specified in a change request for a runbook workflow to end. Currently
@@ -46358,7 +48833,8 @@ type ParameterHistory struct {
// Information about the parameter.
Description *string `type:"string"`
- // The ID of the query key used for this parameter.
+ // The alias of the Key Management Service (KMS) key used to encrypt the parameter.
+ // Applies to SecureString parameters only
KeyId *string `min:"1" type:"string"`
// Labels assigned to the parameter version.
@@ -46695,11 +49171,15 @@ func (s *ParameterMaxVersionLimitExceeded) RequestID() string {
return s.RespMetadata.RequestID
}
-// Metadata includes information like the ARN of the last user and the date/time
-// the parameter was last used.
+// Metadata includes information like the Amazon Resource Name (ARN) of the
+// last user to update the parameter and the date and time the parameter was
+// last used.
type ParameterMetadata struct {
_ struct{} `type:"structure"`
+ // The (ARN) of the last user to update the parameter.
+ ARN *string `type:"string"`
+
// A parameter name can include only the following letters and symbols.
//
// a-zA-Z0-9_.-
@@ -46712,7 +49192,8 @@ type ParameterMetadata struct {
// Description of the parameter actions.
Description *string `type:"string"`
- // The ID of the query key used for this parameter.
+ // The alias of the Key Management Service (KMS) key used to encrypt the parameter.
+ // Applies to SecureString parameters only.
KeyId *string `min:"1" type:"string"`
// Date the parameter was last changed or updated.
@@ -46757,6 +49238,12 @@ func (s ParameterMetadata) GoString() string {
return s.String()
}
+// SetARN sets the ARN field's value.
+func (s *ParameterMetadata) SetARN(v string) *ParameterMetadata {
+ s.ARN = &v
+ return s
+}
+
// SetAllowedPattern sets the AllowedPattern field's value.
func (s *ParameterMetadata) SetAllowedPattern(v string) *ParameterMetadata {
s.AllowedPattern = &v
@@ -47239,6 +49726,74 @@ func (s *ParametersFilter) SetValues(v []*string) *ParametersFilter {
return s
}
+// A detailed status of the parent step.
+type ParentStepDetails struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the automation action.
+ Action *string `type:"string"`
+
+ // The current repetition of the loop represented by an integer.
+ Iteration *int64 `type:"integer"`
+
+ // The current value of the specified iterator in the loop.
+ IteratorValue *string `type:"string"`
+
+ // The unique ID of a step execution.
+ StepExecutionId *string `type:"string"`
+
+ // The name of the step.
+ StepName *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ParentStepDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ParentStepDetails) GoString() string {
+ return s.String()
+}
+
+// SetAction sets the Action field's value.
+func (s *ParentStepDetails) SetAction(v string) *ParentStepDetails {
+ s.Action = &v
+ return s
+}
+
+// SetIteration sets the Iteration field's value.
+func (s *ParentStepDetails) SetIteration(v int64) *ParentStepDetails {
+ s.Iteration = &v
+ return s
+}
+
+// SetIteratorValue sets the IteratorValue field's value.
+func (s *ParentStepDetails) SetIteratorValue(v string) *ParentStepDetails {
+ s.IteratorValue = &v
+ return s
+}
+
+// SetStepExecutionId sets the StepExecutionId field's value.
+func (s *ParentStepDetails) SetStepExecutionId(v string) *ParentStepDetails {
+ s.StepExecutionId = &v
+ return s
+}
+
+// SetStepName sets the StepName field's value.
+func (s *ParentStepDetails) SetStepName(v string) *ParentStepDetails {
+ s.StepName = &v
+ return s
+}
+
// Represents metadata about a patch.
type Patch struct {
_ struct{} `type:"structure"`
@@ -47566,6 +50121,9 @@ type PatchComplianceData struct {
// The IDs of one or more Common Vulnerabilities and Exposure (CVE) issues that
// are resolved by the patch.
+ //
+ // Currently, CVE ID values are reported only for patches with a status of Missing
+ // or Failed.
CVEIds *string `type:"string"`
// The classification of the patch, such as SecurityUpdates, Updates, and CriticalUpdates.
@@ -48371,7 +50929,7 @@ type PutComplianceItemsInput struct {
// A summary of the call execution that includes an execution ID, the type of
// execution (for example, Command), and the date/time of the execution using
- // a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'.
+ // a datetime object that is saved in the following format: yyyy-MM-dd'T'HH:mm:ss'Z'
//
// ExecutionSummary is a required field
ExecutionSummary *ComplianceExecutionSummary `type:"structure" required:"true"`
@@ -48669,9 +51227,21 @@ type PutParameterInput struct {
// When you create a String parameter and specify aws:ec2:image, Amazon Web
// Services Systems Manager validates the parameter value is in the required
// format, such as ami-12345abcdeEXAMPLE, and that the specified AMI is available
- // in your Amazon Web Services account. For more information, see Native parameter
- // support for Amazon Machine Image (AMI) IDs (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html)
- // in the Amazon Web Services Systems Manager User Guide.
+ // in your Amazon Web Services account.
+ //
+ // If the action is successful, the service sends back an HTTP 200 response
+ // which indicates a successful PutParameter call for all cases except for data
+ // type aws:ec2:image. If you call PutParameter with aws:ec2:image data type,
+ // a successful HTTP 200 response does not guarantee that your parameter was
+ // successfully created or updated. The aws:ec2:image value is validated asynchronously,
+ // and the PutParameter call returns before the validation is complete. If you
+ // submit an invalid AMI value, the PutParameter operation will return success,
+ // but the asynchronous validation will fail and the parameter will not be created
+ // or updated. To monitor whether your aws:ec2:image parameters are created
+ // successfully, see Setting up notifications or trigger actions based on Parameter
+ // Store events (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-cwe.html).
+ // For more information about AMI format validation , see Native parameter support
+ // for Amazon Machine Image IDs (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html).
DataType *string `type:"string"`
// Information about the parameter that you want to add to the system. Optional
@@ -48681,22 +51251,22 @@ type PutParameterInput struct {
Description *string `type:"string"`
// The Key Management Service (KMS) ID that you want to use to encrypt a parameter.
- // Either the default KMS key automatically assigned to your Amazon Web Services
- // account or a custom key. Required for parameters that use the SecureString
- // data type.
+ // Use a custom key for better security. Required for parameters that use the
+ // SecureString data type.
//
// If you don't specify a key ID, the system uses the default key associated
- // with your Amazon Web Services account.
- //
- // * To use your default KMS key, choose the SecureString data type, and
- // do not specify the Key ID when you create the parameter. The system automatically
- // populates Key ID with your default KMS key.
+ // with your Amazon Web Services account which is not as secure as using a custom
+ // key.
//
// * To use a custom KMS key, choose the SecureString data type with the
// Key ID parameter.
KeyId *string `min:"1" type:"string"`
// The fully qualified name of the parameter that you want to add to the system.
+ //
+ // You can't enter the Amazon Resource Name (ARN) for a parameter, only the
+ // parameter name itself.
+ //
// The fully qualified name includes the complete hierarchy of the parameter
// path and name. For parameters in a hierarchy, you must include a leading
// forward slash character (/) when you create or reference a parameter. For
@@ -48786,8 +51356,7 @@ type PutParameterInput struct {
// Advanced parameters have a content size limit of 8 KB and can be configured
// to use parameter policies. You can create a maximum of 100,000 advanced parameters
// for each Region in an Amazon Web Services account. Advanced parameters incur
- // a charge. For more information, see Standard and advanced parameter tiers
- // (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html)
+ // a charge. For more information, see Managing parameter tiers (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// You can change a standard parameter to an advanced parameter any time. But
@@ -48835,7 +51404,7 @@ type PutParameterInput struct {
// account in the current Amazon Web Services Region.
//
// For more information about configuring the default tier option, see Specifying
- // a default parameter tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html)
+ // a default parameter tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html#ps-default-tier)
// in the Amazon Web Services Systems Manager User Guide.
Tier *string `type:"string" enum:"ParameterTier"`
@@ -49029,6 +51598,130 @@ func (s *PutParameterOutput) SetVersion(v int64) *PutParameterOutput {
return s
}
+type PutResourcePolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // A policy you want to associate with a resource.
+ //
+ // Policy is a required field
+ Policy *string `type:"string" required:"true"`
+
+ // ID of the current policy version. The hash helps to prevent a situation where
+ // multiple users attempt to overwrite a policy. You must provide this hash
+ // when updating or deleting a policy.
+ PolicyHash *string `type:"string"`
+
+ // The policy ID.
+ PolicyId *string `type:"string"`
+
+ // Amazon Resource Name (ARN) of the resource to which you want to attach a
+ // policy.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"20" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutResourcePolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutResourcePolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutResourcePolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"}
+ if s.Policy == nil {
+ invalidParams.Add(request.NewErrParamRequired("Policy"))
+ }
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyHash sets the PolicyHash field's value.
+func (s *PutResourcePolicyInput) SetPolicyHash(v string) *PutResourcePolicyInput {
+ s.PolicyHash = &v
+ return s
+}
+
+// SetPolicyId sets the PolicyId field's value.
+func (s *PutResourcePolicyInput) SetPolicyId(v string) *PutResourcePolicyInput {
+ s.PolicyId = &v
+ return s
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput {
+ s.ResourceArn = &v
+ return s
+}
+
+type PutResourcePolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // ID of the current policy version.
+ PolicyHash *string `type:"string"`
+
+ // The policy ID. To update a policy, you must specify PolicyId and PolicyHash.
+ PolicyId *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutResourcePolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PutResourcePolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyHash sets the PolicyHash field's value.
+func (s *PutResourcePolicyOutput) SetPolicyHash(v string) *PutResourcePolicyOutput {
+ s.PolicyHash = &v
+ return s
+}
+
+// SetPolicyId sets the PolicyId field's value.
+func (s *PutResourcePolicyOutput) SetPolicyId(v string) *PutResourcePolicyOutput {
+ s.PolicyId = &v
+ return s
+}
+
type RegisterDefaultPatchBaselineInput struct {
_ struct{} `type:"structure"`
@@ -49432,6 +52125,9 @@ func (s *RegisterTargetWithMaintenanceWindowOutput) SetWindowTargetId(v string)
type RegisterTaskWithMaintenanceWindowInput struct {
_ struct{} `type:"structure"`
+ // The CloudWatch alarm you want to apply to your maintenance window task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// User-provided idempotency token.
ClientToken *string `min:"1" type:"string" idempotencyToken:"true"`
@@ -49499,17 +52195,16 @@ type RegisterTaskWithMaintenanceWindowInput struct {
// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services
// Systems Manager to assume when running a maintenance window task. If you
- // do not specify a service role ARN, Systems Manager uses your account's service-linked
- // role. If no service-linked role for Systems Manager exists in your account,
- // it is created when you run RegisterTaskWithMaintenanceWindow.
- //
- // For more information, see the following topics in the in the Amazon Web Services
- // Systems Manager User Guide:
- //
- // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions)
- //
- // * Should I use a service-linked role or a custom service role to run maintenance
- // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role)
+ // do not specify a service role ARN, Systems Manager uses a service-linked
+ // role in your account. If no appropriate service-linked role for Systems Manager
+ // exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.
+ //
+ // However, for an improved security posture, we strongly recommend creating
+ // a custom policy and custom service role for running your maintenance window
+ // tasks. The policy can be crafted to provide only the permissions needed for
+ // your particular maintenance window tasks. For more information, see Setting
+ // up maintenance windows (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html)
+ // in the in the Amazon Web Services Systems Manager User Guide.
ServiceRoleArn *string `type:"string"`
// The targets (either managed nodes or maintenance window targets).
@@ -49613,6 +52308,11 @@ func (s *RegisterTaskWithMaintenanceWindowInput) Validate() error {
if s.WindowId != nil && len(*s.WindowId) < 20 {
invalidParams.Add(request.NewErrParamMinLen("WindowId", 20))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.LoggingInfo != nil {
if err := s.LoggingInfo.Validate(); err != nil {
invalidParams.AddNested("LoggingInfo", err.(request.ErrInvalidParams))
@@ -49640,6 +52340,12 @@ func (s *RegisterTaskWithMaintenanceWindowInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *RegisterTaskWithMaintenanceWindowInput) SetAlarmConfiguration(v *AlarmConfiguration) *RegisterTaskWithMaintenanceWindowInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetClientToken sets the ClientToken field's value.
func (s *RegisterTaskWithMaintenanceWindowInput) SetClientToken(v string) *RegisterTaskWithMaintenanceWindowInput {
s.ClientToken = &v
@@ -50004,6 +52710,8 @@ type ResetServiceSettingInput struct {
// The Amazon Resource Name (ARN) of the service setting to reset. The setting
// ID can be one of the following.
//
+ // * /ssm/managed-instance/default-ec2-instance-management-role
+ //
// * /ssm/automation/customer-script-log-destination
//
// * /ssm/automation/customer-script-log-group-name
@@ -50982,7 +53690,8 @@ type ResourceDataSyncSource struct {
// options, then Systems Manager automatically enables all OpsData sources in
// the selected Amazon Web Services Regions for all Amazon Web Services accounts
// in your organization (or in the selected organization units). For more information,
- // see About multiple account and Region resource data syncs (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resouce-data-sync-multiple-accounts-and-regions.html)
+ // see Setting up Systems Manager Explorer to display data from multiple accounts
+ // and Regions (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html)
// in the Amazon Web Services Systems Manager User Guide.
EnableAllOpsDataSources *bool `type:"boolean"`
@@ -51100,7 +53809,8 @@ type ResourceDataSyncSourceWithState struct {
// options, then Systems Manager automatically enables all OpsData sources in
// the selected Amazon Web Services Regions for all Amazon Web Services accounts
// in your organization (or in the selected organization units). For more information,
- // see About multiple account and Region resource data syncs (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resouce-data-sync-multiple-accounts-and-regions.html)
+ // see Setting up Systems Manager Explorer to display data from multiple accounts
+ // and Regions (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html)
// in the Amazon Web Services Systems Manager User Guide.
EnableAllOpsDataSources *bool `type:"boolean"`
@@ -51320,6 +54030,337 @@ func (s *ResourceLimitExceededException) RequestID() string {
return s.RespMetadata.RequestID
}
+// The specified parameter to be shared could not be found.
+type ResourceNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourceNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourceNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
+ return &ResourceNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourceNotFoundException) Code() string {
+ return "ResourceNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *ResourceNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourceNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourceNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourceNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The hash provided in the call doesn't match the stored hash. This exception
+// is thrown when trying to update an obsolete policy version or when multiple
+// requests to update a policy are sent.
+type ResourcePolicyConflictException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyConflictException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyConflictException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourcePolicyConflictException(v protocol.ResponseMetadata) error {
+ return &ResourcePolicyConflictException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourcePolicyConflictException) Code() string {
+ return "ResourcePolicyConflictException"
+}
+
+// Message returns the exception's message.
+func (s *ResourcePolicyConflictException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourcePolicyConflictException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourcePolicyConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourcePolicyConflictException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourcePolicyConflictException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// One or more parameters specified for the call aren't valid. Verify the parameters
+// and their values and try again.
+type ResourcePolicyInvalidParameterException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+
+ ParameterNames []*string `type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyInvalidParameterException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyInvalidParameterException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourcePolicyInvalidParameterException(v protocol.ResponseMetadata) error {
+ return &ResourcePolicyInvalidParameterException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourcePolicyInvalidParameterException) Code() string {
+ return "ResourcePolicyInvalidParameterException"
+}
+
+// Message returns the exception's message.
+func (s *ResourcePolicyInvalidParameterException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourcePolicyInvalidParameterException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourcePolicyInvalidParameterException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourcePolicyInvalidParameterException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourcePolicyInvalidParameterException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// The PutResourcePolicy API action enforces two limits. A policy can't be greater
+// than 1024 bytes in size. And only one policy can be attached to OpsItemGroup.
+// Verify these limits and try again.
+type ResourcePolicyLimitExceededException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Limit *int64 `type:"integer"`
+
+ LimitType *string `type:"string"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyLimitExceededException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyLimitExceededException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourcePolicyLimitExceededException(v protocol.ResponseMetadata) error {
+ return &ResourcePolicyLimitExceededException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourcePolicyLimitExceededException) Code() string {
+ return "ResourcePolicyLimitExceededException"
+}
+
+// Message returns the exception's message.
+func (s *ResourcePolicyLimitExceededException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourcePolicyLimitExceededException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourcePolicyLimitExceededException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourcePolicyLimitExceededException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourcePolicyLimitExceededException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// No policies with the specified policy ID and hash could be found.
+type ResourcePolicyNotFoundException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ Message_ *string `locationName:"Message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyNotFoundException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ResourcePolicyNotFoundException) GoString() string {
+ return s.String()
+}
+
+func newErrorResourcePolicyNotFoundException(v protocol.ResponseMetadata) error {
+ return &ResourcePolicyNotFoundException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ResourcePolicyNotFoundException) Code() string {
+ return "ResourcePolicyNotFoundException"
+}
+
+// Message returns the exception's message.
+func (s *ResourcePolicyNotFoundException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ResourcePolicyNotFoundException) OrigErr() error {
+ return nil
+}
+
+func (s *ResourcePolicyNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", s.Code(), s.Message())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ResourcePolicyNotFoundException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ResourcePolicyNotFoundException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// The inventory item result attribute.
type ResultAttribute struct {
_ struct{} `type:"structure"`
@@ -51963,6 +55004,9 @@ func (s SendAutomationSignalOutput) GoString() string {
type SendCommandInput struct {
_ struct{} `type:"structure"`
+ // The CloudWatch alarm you want to apply to your command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// Enables Amazon Web Services Systems Manager to send Run Command output to
// Amazon CloudWatch Logs. Run Command is a capability of Amazon Web Services
// Systems Manager.
@@ -51985,8 +55029,8 @@ type SendCommandInput struct {
// The name of the Amazon Web Services Systems Manager document (SSM document)
// to run. This can be a public document or a custom document. To run a shared
// document belonging to another account, specify the document Amazon Resource
- // Name (ARN). For more information about how to use shared documents, see Using
- // shared SSM documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-using-shared.html)
+ // Name (ARN). For more information about how to use shared documents, see Sharing
+ // SSM documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-using-shared.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// If you specify a document name or ARN that hasn't been shared with your account,
@@ -52018,8 +55062,8 @@ type SendCommandInput struct {
// to send commands to, you can a send command to tens, hundreds, or thousands
// of nodes at once.
//
- // For more information about how to use targets, see Using targets and rate
- // controls to send commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html)
+ // For more information about how to use targets, see Run commands at scale
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html)
// in the Amazon Web Services Systems Manager User Guide.
InstanceIds []*string `type:"list"`
@@ -52079,8 +55123,8 @@ type SendCommandInput struct {
// To send a command to a smaller number of managed nodes, you can use the InstanceIds
// option instead.
//
- // For more information about how to use targets, see Sending commands to a
- // fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html)
+ // For more information about how to use targets, see Run commands at scale
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html)
// in the Amazon Web Services Systems Manager User Guide.
Targets []*Target `type:"list"`
@@ -52128,6 +55172,11 @@ func (s *SendCommandInput) Validate() error {
if s.TimeoutSeconds != nil && *s.TimeoutSeconds < 30 {
invalidParams.Add(request.NewErrParamMinValue("TimeoutSeconds", 30))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.CloudWatchOutputConfig != nil {
if err := s.CloudWatchOutputConfig.Validate(); err != nil {
invalidParams.AddNested("CloudWatchOutputConfig", err.(request.ErrInvalidParams))
@@ -52150,6 +55199,12 @@ func (s *SendCommandInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *SendCommandInput) SetAlarmConfiguration(v *AlarmConfiguration) *SendCommandInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCloudWatchOutputConfig sets the CloudWatchOutputConfig field's value.
func (s *SendCommandInput) SetCloudWatchOutputConfig(v *CloudWatchOutputConfig) *SendCommandInput {
s.CloudWatchOutputConfig = v
@@ -52473,7 +55528,7 @@ type Session struct {
// Reserved for future use.
OutputUrl *SessionManagerOutputUrl `type:"structure"`
- // The ID of the Amazon Web Services user account that started the session.
+ // The ID of the Amazon Web Services user that started the session.
Owner *string `min:"1" type:"string"`
// The reason for connecting to the instance.
@@ -52598,8 +55653,8 @@ type SessionFilter struct {
// * Target: Specify a managed node to which session connections have been
// made.
//
- // * Owner: Specify an Amazon Web Services user account to see a list of
- // sessions started by that user.
+ // * Owner: Specify an Amazon Web Services user to see a list of sessions
+ // started by that user.
//
// * Status: Specify a valid session status to see a list of all sessions
// with that status. Status values you can specify include: Connected Connecting
@@ -52865,6 +55920,9 @@ func (s StartAssociationsOnceOutput) GoString() string {
type StartAutomationExecutionInput struct {
_ struct{} `type:"structure"`
+ // The CloudWatch alarm you want to apply to your automation.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// User-provided idempotency token. The token must be unique, is case insensitive,
// enforces the UUID format, and can't be reused.
ClientToken *string `min:"36" type:"string"`
@@ -52872,7 +55930,7 @@ type StartAutomationExecutionInput struct {
// The name of the SSM document to run. This can be a public document or a custom
// document. To run a shared document belonging to another account, specify
// the document ARN. For more information about how to use shared documents,
- // see Using shared SSM documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-using-shared.html)
+ // see Sharing SSM documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/documents-ssm-sharing.html)
// in the Amazon Web Services Systems Manager User Guide.
//
// DocumentName is a required field
@@ -52986,6 +56044,11 @@ func (s *StartAutomationExecutionInput) Validate() error {
if s.TargetParameterName != nil && len(*s.TargetParameterName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TargetParameterName", 1))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@@ -53023,6 +56086,12 @@ func (s *StartAutomationExecutionInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *StartAutomationExecutionInput) SetAlarmConfiguration(v *AlarmConfiguration) *StartAutomationExecutionInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetClientToken sets the ClientToken field's value.
func (s *StartAutomationExecutionInput) SetClientToken(v string) *StartAutomationExecutionInput {
s.ClientToken = &v
@@ -53628,6 +56697,9 @@ type StepExecution struct {
// A user-specified list of parameters to override when running a step.
OverriddenParameters map[string][]*string `min:"1" type:"map"`
+ // Information about the parent step.
+ ParentStepDetails *ParentStepDetails `type:"structure"`
+
// A message associated with the response code for an execution.
Response *string `type:"string"`
@@ -53653,6 +56725,9 @@ type StepExecution struct {
// The timeout seconds of the step.
TimeoutSeconds *int64 `type:"long"`
+ // The CloudWatch alarms that were invoked by the automation.
+ TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
+
// Strategies used when step fails, we support Continue and Abort. Abort will
// fail the automation when the step fails. Continue will ignore the failure
// of current step and allow automation to run the next step. With conditional
@@ -53757,6 +56832,12 @@ func (s *StepExecution) SetOverriddenParameters(v map[string][]*string) *StepExe
return s
}
+// SetParentStepDetails sets the ParentStepDetails field's value.
+func (s *StepExecution) SetParentStepDetails(v *ParentStepDetails) *StepExecution {
+ s.ParentStepDetails = v
+ return s
+}
+
// SetResponse sets the Response field's value.
func (s *StepExecution) SetResponse(v string) *StepExecution {
s.Response = &v
@@ -53805,6 +56886,12 @@ func (s *StepExecution) SetTimeoutSeconds(v int64) *StepExecution {
return s
}
+// SetTriggeredAlarms sets the TriggeredAlarms field's value.
+func (s *StepExecution) SetTriggeredAlarms(v []*AlarmStateInformation) *StepExecution {
+ s.TriggeredAlarms = v
+ return s
+}
+
// SetValidNextSteps sets the ValidNextSteps field's value.
func (s *StepExecution) SetValidNextSteps(v []*string) *StepExecution {
s.ValidNextSteps = v
@@ -53816,9 +56903,7 @@ func (s *StepExecution) SetValidNextSteps(v []*string) *StepExecution {
type StepExecutionFilter struct {
_ struct{} `type:"structure"`
- // One or more keys to limit the results. Valid filter keys include the following:
- // StepName, Action, StepExecutionId, StepExecutionStatus, StartTimeBefore,
- // StartTimeAfter.
+ // One or more keys to limit the results.
//
// Key is a required field
Key *string `type:"string" required:"true" enum:"StepExecutionFilterKey"`
@@ -54039,7 +57124,7 @@ type Tag struct {
// The value of the tag.
//
// Value is a required field
- Value *string `min:"1" type:"string" required:"true"`
+ Value *string `type:"string" required:"true"`
}
// String returns the string representation.
@@ -54072,9 +57157,6 @@ func (s *Tag) Validate() error {
if s.Value == nil {
invalidParams.Add(request.NewErrParamRequired("Value"))
}
- if s.Value != nil && len(*s.Value) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Value", 1))
- }
if invalidParams.Len() > 0 {
return invalidParams
@@ -54106,42 +57188,42 @@ func (s *Tag) SetValue(v string) *Tag {
//
// Supported formats include the following.
//
-// - Key=InstanceIds,Values=,,
+// For all Systems Manager capabilities:
+//
+// - Key=tag-key,Values=tag-value-1,tag-value-2
+//
+// For Automation and Change Manager:
//
-// - Key=tag:,Values=,
+// - Key=tag:tag-key,Values=tag-value
//
-// - Key=tag-key,Values=,
+// - Key=ResourceGroup,Values=resource-group-name
//
-// - Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=
+// - Key=ParameterValues,Values=value-1,value-2,value-3
//
-// - Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=,
+// - To target all instances in the Amazon Web Services Region: Key=AWS::EC2::Instance,Values=*
+// Key=InstanceIds,Values=*
//
-// - Automation targets only: Key=ResourceGroup;Values=
+// For Run Command and Maintenance Windows:
//
-// For example:
+// - Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
//
-// - Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE
+// - Key=tag:tag-key,Values=tag-value-1,tag-value-2
//
-// - Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3
+// - Key=resource-groups:Name,Values=resource-group-name
//
-// - Key=tag-key,Values=Name,Instance-Type,CostCenter
+// - Additionally, Maintenance Windows support targeting resource types:
+// Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2
//
-// - Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup
-// This example demonstrates how to target all resources in the resource
-// group ProductionResourceGroup in your maintenance window.
+// For State Manager:
//
-// - Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC
-// This example demonstrates how to target only Amazon Elastic Compute Cloud
-// (Amazon EC2) instances and VPCs in your maintenance window.
+// - Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3
//
-// - Automation targets only: Key=ResourceGroup,Values=MyResourceGroup
+// - Key=tag:tag-key,Values=tag-value-1,tag-value-2
//
-// - State Manager association targets only: Key=InstanceIds,Values=* This
-// example demonstrates how to target all managed instances in the Amazon
-// Web Services Region where the association was created.
+// - To target all instances in the Amazon Web Services Region: Key=InstanceIds,Values=*
//
// For more information about how to send commands that target managed nodes
-// using Key,Value parameters, see Targeting multiple instances (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting)
+// using Key,Value parameters, see Targeting multiple managed nodes (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting)
// in the Amazon Web Services Systems Manager User Guide.
type Target struct {
_ struct{} `type:"structure"`
@@ -54282,6 +57364,10 @@ type TargetLocation struct {
// The Amazon Web Services Regions targeted by the current Automation execution.
Regions []*string `min:"1" type:"list"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ TargetLocationAlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The maximum number of Amazon Web Services Regions and Amazon Web Services
// accounts allowed to run the Automation concurrently.
TargetLocationMaxConcurrency *string `min:"1" type:"string"`
@@ -54327,6 +57413,11 @@ func (s *TargetLocation) Validate() error {
if s.TargetLocationMaxErrors != nil && len(*s.TargetLocationMaxErrors) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TargetLocationMaxErrors", 1))
}
+ if s.TargetLocationAlarmConfiguration != nil {
+ if err := s.TargetLocationAlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("TargetLocationAlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -54352,6 +57443,12 @@ func (s *TargetLocation) SetRegions(v []*string) *TargetLocation {
return s
}
+// SetTargetLocationAlarmConfiguration sets the TargetLocationAlarmConfiguration field's value.
+func (s *TargetLocation) SetTargetLocationAlarmConfiguration(v *AlarmConfiguration) *TargetLocation {
+ s.TargetLocationAlarmConfiguration = v
+ return s
+}
+
// SetTargetLocationMaxConcurrency sets the TargetLocationMaxConcurrency field's value.
func (s *TargetLocation) SetTargetLocationMaxConcurrency(v string) *TargetLocation {
s.TargetLocationMaxConcurrency = &v
@@ -54717,6 +57814,9 @@ type UnlabelParameterVersionInput struct {
// The name of the parameter from which you want to delete one or more labels.
//
+ // You can't enter the Amazon Resource Name (ARN) for a parameter, only the
+ // parameter name itself.
+ //
// Name is a required field
Name *string `min:"1" type:"string" required:"true"`
@@ -54895,7 +57995,7 @@ func (s *UnsupportedCalendarException) RequestID() string {
// Patching for applications released by Microsoft is only available on EC2
// instances and advanced instances. To patch applications released by Microsoft
// on on-premises servers and VMs, you must enable advanced instances. For more
-// information, see Enabling the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
+// information, see Turning on the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
// in the Amazon Web Services Systems Manager User Guide.
type UnsupportedFeatureRequiredException struct {
_ struct{} `type:"structure"`
@@ -55224,8 +58324,8 @@ func (s *UnsupportedParameterType) RequestID() string {
}
// The document doesn't support the platform type of the given managed node
-// ID(s). For example, you sent an document for a Windows managed node to a
-// Linux node.
+// IDs. For example, you sent an document for a Windows managed node to a Linux
+// node.
type UnsupportedPlatformType struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -55292,6 +58392,10 @@ func (s *UnsupportedPlatformType) RequestID() string {
type UpdateAssociationInput struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you want to apply to an automation or
+ // command.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// By default, when you update an association, the system runs it immediately
// after it is updated and then according to the schedule you specified. Specify
// this option if you don't want an association to run immediately after you
@@ -55351,6 +58455,22 @@ type UpdateAssociationInput struct {
// form another account, you must set the document version to default.
DocumentVersion *string `type:"string"`
+ // The number of hours the association can run before it is canceled. Duration
+ // applies to associations that are currently running, and any pending and in
+ // progress commands on all targets. If a target was taken offline for the association
+ // to run, it is made available again immediately, without a reboot.
+ //
+ // The Duration parameter applies only when both these conditions are true:
+ //
+ // * The association for which you specify a duration is cancelable according
+ // to the parameters of the SSM command document or Automation runbook associated
+ // with this execution.
+ //
+ // * The command specifies the ApplyOnlyAtCronInterval (https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_UpdateAssociation.html#systemsmanager-UpdateAssociation-request-ApplyOnlyAtCronInterval)
+ // parameter, which means that the association doesn't run immediately after
+ // it is updated, but only according to the specified schedule.
+ Duration *int64 `min:"1" type:"integer"`
+
// The maximum number of targets allowed to run the association at the same
// time. You can specify a number, for example 10, or a percentage of the target
// set, for example 10%. The default value is 100%, which means all targets
@@ -55479,6 +58599,9 @@ func (s *UpdateAssociationInput) Validate() error {
if s.AutomationTargetParameterName != nil && len(*s.AutomationTargetParameterName) < 1 {
invalidParams.Add(request.NewErrParamMinLen("AutomationTargetParameterName", 1))
}
+ if s.Duration != nil && *s.Duration < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Duration", 1))
+ }
if s.MaxConcurrency != nil && len(*s.MaxConcurrency) < 1 {
invalidParams.Add(request.NewErrParamMinLen("MaxConcurrency", 1))
}
@@ -55494,6 +58617,11 @@ func (s *UpdateAssociationInput) Validate() error {
if s.TargetLocations != nil && len(s.TargetLocations) < 1 {
invalidParams.Add(request.NewErrParamMinLen("TargetLocations", 1))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.OutputLocation != nil {
if err := s.OutputLocation.Validate(); err != nil {
invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams))
@@ -55526,6 +58654,12 @@ func (s *UpdateAssociationInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *UpdateAssociationInput) SetAlarmConfiguration(v *AlarmConfiguration) *UpdateAssociationInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value.
func (s *UpdateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *UpdateAssociationInput {
s.ApplyOnlyAtCronInterval = &v
@@ -55574,6 +58708,12 @@ func (s *UpdateAssociationInput) SetDocumentVersion(v string) *UpdateAssociation
return s
}
+// SetDuration sets the Duration field's value.
+func (s *UpdateAssociationInput) SetDuration(v int64) *UpdateAssociationInput {
+ s.Duration = &v
+ return s
+}
+
// SetMaxConcurrency sets the MaxConcurrency field's value.
func (s *UpdateAssociationInput) SetMaxConcurrency(v string) *UpdateAssociationInput {
s.MaxConcurrency = &v
@@ -55912,8 +59052,8 @@ type UpdateDocumentInput struct {
TargetType *string `type:"string"`
// An optional field specifying the version of the artifact you are updating
- // with the document. For example, "Release 12, Update 6". This value is unique
- // across all versions of a document, and can't be changed.
+ // with the document. For example, 12.6. This value is unique across all versions
+ // of a document, and can't be changed.
VersionName *string `type:"string"`
}
@@ -56200,6 +59340,9 @@ type UpdateMaintenanceWindowInput struct {
// The date and time, in ISO-8601 Extended format, for when you want the maintenance
// window to become active. StartDate allows you to delay activation of the
// maintenance window until the specified future date.
+ //
+ // When using a rate schedule, if you provide a start date that occurs in the
+ // past, the current date and time are used as the start date.
StartDate *string `type:"string"`
// The ID of the maintenance window to update.
@@ -56707,6 +59850,9 @@ func (s *UpdateMaintenanceWindowTargetOutput) SetWindowTargetId(v string) *Updat
type UpdateMaintenanceWindowTaskInput struct {
_ struct{} `type:"structure"`
+ // The CloudWatch alarm you want to apply to your maintenance window task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// Indicates whether tasks should continue to run after the cutoff time specified
// in the maintenance windows is reached.
//
@@ -56776,17 +59922,16 @@ type UpdateMaintenanceWindowTaskInput struct {
// The Amazon Resource Name (ARN) of the IAM service role for Amazon Web Services
// Systems Manager to assume when running a maintenance window task. If you
- // do not specify a service role ARN, Systems Manager uses your account's service-linked
- // role. If no service-linked role for Systems Manager exists in your account,
- // it is created when you run RegisterTaskWithMaintenanceWindow.
- //
- // For more information, see the following topics in the in the Amazon Web Services
- // Systems Manager User Guide:
- //
- // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions)
- //
- // * Should I use a service-linked role or a custom service role to run maintenance
- // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role)
+ // do not specify a service role ARN, Systems Manager uses a service-linked
+ // role in your account. If no appropriate service-linked role for Systems Manager
+ // exists in your account, it is created when you run RegisterTaskWithMaintenanceWindow.
+ //
+ // However, for an improved security posture, we strongly recommend creating
+ // a custom policy and custom service role for running your maintenance window
+ // tasks. The policy can be crafted to provide only the permissions needed for
+ // your particular maintenance window tasks. For more information, see Setting
+ // up maintenance windows (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html)
+ // in the in the Amazon Web Services Systems Manager User Guide.
ServiceRoleArn *string `type:"string"`
// The targets (either managed nodes or tags) to modify. Managed nodes are specified
@@ -56893,6 +60038,11 @@ func (s *UpdateMaintenanceWindowTaskInput) Validate() error {
if s.WindowTaskId != nil && len(*s.WindowTaskId) < 36 {
invalidParams.Add(request.NewErrParamMinLen("WindowTaskId", 36))
}
+ if s.AlarmConfiguration != nil {
+ if err := s.AlarmConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AlarmConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
if s.LoggingInfo != nil {
if err := s.LoggingInfo.Validate(); err != nil {
invalidParams.AddNested("LoggingInfo", err.(request.ErrInvalidParams))
@@ -56920,6 +60070,12 @@ func (s *UpdateMaintenanceWindowTaskInput) Validate() error {
return nil
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *UpdateMaintenanceWindowTaskInput) SetAlarmConfiguration(v *AlarmConfiguration) *UpdateMaintenanceWindowTaskInput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCutoffBehavior sets the CutoffBehavior field's value.
func (s *UpdateMaintenanceWindowTaskInput) SetCutoffBehavior(v string) *UpdateMaintenanceWindowTaskInput {
s.CutoffBehavior = &v
@@ -57013,6 +60169,10 @@ func (s *UpdateMaintenanceWindowTaskInput) SetWindowTaskId(v string) *UpdateMain
type UpdateMaintenanceWindowTaskOutput struct {
_ struct{} `type:"structure"`
+ // The details for the CloudWatch alarm you applied to your maintenance window
+ // task.
+ AlarmConfiguration *AlarmConfiguration `type:"structure"`
+
// The specification for whether tasks should continue to run after the cutoff
// time specified in the maintenance windows is reached.
CutoffBehavior *string `type:"string" enum:"MaintenanceWindowTaskCutoffBehavior"`
@@ -57096,6 +60256,12 @@ func (s UpdateMaintenanceWindowTaskOutput) GoString() string {
return s.String()
}
+// SetAlarmConfiguration sets the AlarmConfiguration field's value.
+func (s *UpdateMaintenanceWindowTaskOutput) SetAlarmConfiguration(v *AlarmConfiguration) *UpdateMaintenanceWindowTaskOutput {
+ s.AlarmConfiguration = v
+ return s
+}
+
// SetCutoffBehavior sets the CutoffBehavior field's value.
func (s *UpdateMaintenanceWindowTaskOutput) SetCutoffBehavior(v string) *UpdateMaintenanceWindowTaskOutput {
s.CutoffBehavior = &v
@@ -57183,7 +60349,15 @@ func (s *UpdateMaintenanceWindowTaskOutput) SetWindowTaskId(v string) *UpdateMai
type UpdateManagedInstanceRoleInput struct {
_ struct{} `type:"structure"`
- // The IAM role you want to assign or change.
+ // The name of the Identity and Access Management (IAM) role that you want to
+ // assign to the managed node. This IAM role must provide AssumeRole permissions
+ // for the Amazon Web Services Systems Manager service principal ssm.amazonaws.com.
+ // For more information, see Create an IAM service role for a hybrid and multicloud
+ // environment (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html)
+ // in the Amazon Web Services Systems Manager User Guide.
+ //
+ // You can't specify an IAM service-linked role for this parameter. You must
+ // create a unique role.
//
// IamRole is a required field
IamRole *string `type:"string" required:"true"`
@@ -57279,8 +60453,8 @@ type UpdateOpsItemInput struct {
// Specify a new category for an OpsItem.
Category *string `min:"1" type:"string"`
- // Update the information about the OpsItem. Provide enough information so that
- // users reading this OpsItem for the first time understand the issue.
+ // User-defined text that contains information about the OpsItem, in Markdown
+ // format.
Description *string `min:"1" type:"string"`
// The Amazon Resource Name (ARN) of an SNS topic where notifications are sent
@@ -57309,13 +60483,16 @@ type UpdateOpsItemInput struct {
// Use the /aws/resources key in OperationalData to specify a related resource
// in the request. Use the /aws/automations key in OperationalData to associate
// an Automation runbook with the OpsItem. To view Amazon Web Services CLI example
- // commands that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems)
+ // commands that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-manually-create-OpsItems.html)
// in the Amazon Web Services Systems Manager User Guide.
OperationalData map[string]*OpsItemDataValue `type:"map"`
// Keys that you want to remove from the OperationalData map.
OperationalDataToDelete []*string `type:"list"`
+ // The OpsItem Amazon Resource Name (ARN).
+ OpsItemArn *string `min:"20" type:"string"`
+
// The ID of the OpsItem.
//
// OpsItemId is a required field
@@ -57341,7 +60518,7 @@ type UpdateOpsItemInput struct {
Severity *string `min:"1" type:"string"`
// The OpsItem status. Status can be Open, In Progress, or Resolved. For more
- // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems.html#OpsCenter-working-with-OpsItems-editing-details)
+ // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html)
// in the Amazon Web Services Systems Manager User Guide.
Status *string `type:"string" enum:"OpsItemStatus"`
@@ -57377,6 +60554,9 @@ func (s *UpdateOpsItemInput) Validate() error {
if s.Description != nil && len(*s.Description) < 1 {
invalidParams.Add(request.NewErrParamMinLen("Description", 1))
}
+ if s.OpsItemArn != nil && len(*s.OpsItemArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("OpsItemArn", 20))
+ }
if s.OpsItemId == nil {
invalidParams.Add(request.NewErrParamRequired("OpsItemId"))
}
@@ -57448,6 +60628,12 @@ func (s *UpdateOpsItemInput) SetOperationalDataToDelete(v []*string) *UpdateOpsI
return s
}
+// SetOpsItemArn sets the OpsItemArn field's value.
+func (s *UpdateOpsItemInput) SetOpsItemArn(v string) *UpdateOpsItemInput {
+ s.OpsItemArn = &v
+ return s
+}
+
// SetOpsItemId sets the OpsItemId field's value.
func (s *UpdateOpsItemInput) SetOpsItemId(v string) *UpdateOpsItemInput {
s.OpsItemId = &v
@@ -57684,11 +60870,11 @@ type UpdatePatchBaselineInput struct {
// with the patch baseline, and its status is reported as InstalledOther.
// This is the default action if no option is specified.
//
- // * BLOCK : Packages in the RejectedPatches list, and packages that include
- // them as dependencies, aren't installed under any circumstances. If a package
- // was installed before it was added to the Rejected patches list, it is
- // considered non-compliant with the patch baseline, and its status is reported
- // as InstalledRejected.
+ // * BLOCK: Packages in the Rejected patches list, and packages that include
+ // them as dependencies, aren't installed by Patch Manager under any circumstances.
+ // If a package was installed before it was added to the Rejected patches
+ // list, or is installed outside of Patch Manager afterward, it's considered
+ // noncompliant with the patch baseline and its status is reported as InstalledRejected.
RejectedPatchesAction *string `type:"string" enum:"PatchAction"`
// If True, then all fields that are required by the CreatePatchBaseline operation
@@ -58100,10 +61286,12 @@ func (s UpdateResourceDataSyncOutput) GoString() string {
type UpdateServiceSettingInput struct {
_ struct{} `type:"structure"`
- // The Amazon Resource Name (ARN) of the service setting to reset. For example,
+ // The Amazon Resource Name (ARN) of the service setting to update. For example,
// arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled.
// The setting ID can be one of the following.
//
+ // * /ssm/managed-instance/default-ec2-instance-management-role
+ //
// * /ssm/automation/customer-script-log-destination
//
// * /ssm/automation/customer-script-log-group-name
@@ -58118,26 +61306,36 @@ type UpdateServiceSettingInput struct {
//
// * /ssm/parameter-store/high-throughput-enabled
//
+ // Permissions to update the /ssm/managed-instance/default-ec2-instance-management-role
+ // setting should only be provided to administrators. Implement least privilege
+ // access when allowing individuals to configure or modify the Default Host
+ // Management Configuration.
+ //
// SettingId is a required field
SettingId *string `min:"1" type:"string" required:"true"`
// The new value to specify for the service setting. The following list specifies
// the available values for each setting.
//
- // * /ssm/automation/customer-script-log-destination: CloudWatch
+ // * For /ssm/managed-instance/default-ec2-instance-management-role, enter
+ // the name of an IAM role.
+ //
+ // * For /ssm/automation/customer-script-log-destination, enter CloudWatch.
//
- // * /ssm/automation/customer-script-log-group-name: the name of an Amazon
- // CloudWatch Logs log group
+ // * For /ssm/automation/customer-script-log-group-name, enter the name of
+ // an Amazon CloudWatch Logs log group.
//
- // * /ssm/documents/console/public-sharing-permission: Enable or Disable
+ // * For /ssm/documents/console/public-sharing-permission, enter Enable or
+ // Disable.
//
- // * /ssm/managed-instance/activation-tier: standard or advanced
+ // * For /ssm/managed-instance/activation-tier, enter standard or advanced.
//
- // * /ssm/opsinsights/opscenter: Enabled or Disabled
+ // * For /ssm/opsinsights/opscenter, enter Enabled or Disabled.
//
- // * /ssm/parameter-store/default-parameter-tier: Standard, Advanced, Intelligent-Tiering
+ // * For /ssm/parameter-store/default-parameter-tier, enter Standard, Advanced,
+ // or Intelligent-Tiering
//
- // * /ssm/parameter-store/high-throughput-enabled: true or false
+ // * For /ssm/parameter-store/high-throughput-enabled, enter true or false.
//
// SettingValue is a required field
SettingValue *string `min:"1" type:"string" required:"true"`
@@ -58524,6 +61722,9 @@ const (
// AutomationExecutionStatusCompletedWithFailure is a AutomationExecutionStatus enum value
AutomationExecutionStatusCompletedWithFailure = "CompletedWithFailure"
+
+ // AutomationExecutionStatusExited is a AutomationExecutionStatus enum value
+ AutomationExecutionStatusExited = "Exited"
)
// AutomationExecutionStatus_Values returns all elements of the AutomationExecutionStatus enum
@@ -58547,6 +61748,7 @@ func AutomationExecutionStatus_Values() []string {
AutomationExecutionStatusChangeCalendarOverrideRejected,
AutomationExecutionStatusCompletedWithSuccess,
AutomationExecutionStatusCompletedWithFailure,
+ AutomationExecutionStatusExited,
}
}
@@ -58824,17 +62026,17 @@ func ComplianceUploadType_Values() []string {
const (
// ConnectionStatusConnected is a ConnectionStatus enum value
- ConnectionStatusConnected = "Connected"
+ ConnectionStatusConnected = "connected"
- // ConnectionStatusNotConnected is a ConnectionStatus enum value
- ConnectionStatusNotConnected = "NotConnected"
+ // ConnectionStatusNotconnected is a ConnectionStatus enum value
+ ConnectionStatusNotconnected = "notconnected"
)
// ConnectionStatus_Values returns all elements of the ConnectionStatus enum
func ConnectionStatus_Values() []string {
return []string{
ConnectionStatusConnected,
- ConnectionStatusNotConnected,
+ ConnectionStatusNotconnected,
}
}
@@ -59059,6 +62261,15 @@ const (
// DocumentTypeProblemAnalysisTemplate is a DocumentType enum value
DocumentTypeProblemAnalysisTemplate = "ProblemAnalysisTemplate"
+
+ // DocumentTypeCloudFormation is a DocumentType enum value
+ DocumentTypeCloudFormation = "CloudFormation"
+
+ // DocumentTypeConformancePackTemplate is a DocumentType enum value
+ DocumentTypeConformancePackTemplate = "ConformancePackTemplate"
+
+ // DocumentTypeQuickSetup is a DocumentType enum value
+ DocumentTypeQuickSetup = "QuickSetup"
)
// DocumentType_Values returns all elements of the DocumentType enum
@@ -59076,6 +62287,9 @@ func DocumentType_Values() []string {
DocumentTypeAutomationChangeTemplate,
DocumentTypeProblemAnalysis,
DocumentTypeProblemAnalysisTemplate,
+ DocumentTypeCloudFormation,
+ DocumentTypeConformancePackTemplate,
+ DocumentTypeQuickSetup,
}
}
@@ -59095,6 +62309,22 @@ func ExecutionMode_Values() []string {
}
}
+const (
+ // ExternalAlarmStateUnknown is a ExternalAlarmState enum value
+ ExternalAlarmStateUnknown = "UNKNOWN"
+
+ // ExternalAlarmStateAlarm is a ExternalAlarmState enum value
+ ExternalAlarmStateAlarm = "ALARM"
+)
+
+// ExternalAlarmState_Values returns all elements of the ExternalAlarmState enum
+func ExternalAlarmState_Values() []string {
+ return []string{
+ ExternalAlarmStateUnknown,
+ ExternalAlarmStateAlarm,
+ }
+}
+
const (
// FaultClient is a Fault enum value
FaultClient = "Client"
@@ -59179,6 +62409,78 @@ func InstancePatchStateOperatorType_Values() []string {
}
}
+const (
+ // InstancePropertyFilterKeyInstanceIds is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyInstanceIds = "InstanceIds"
+
+ // InstancePropertyFilterKeyAgentVersion is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyAgentVersion = "AgentVersion"
+
+ // InstancePropertyFilterKeyPingStatus is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyPingStatus = "PingStatus"
+
+ // InstancePropertyFilterKeyPlatformTypes is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyPlatformTypes = "PlatformTypes"
+
+ // InstancePropertyFilterKeyDocumentName is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyDocumentName = "DocumentName"
+
+ // InstancePropertyFilterKeyActivationIds is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyActivationIds = "ActivationIds"
+
+ // InstancePropertyFilterKeyIamRole is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyIamRole = "IamRole"
+
+ // InstancePropertyFilterKeyResourceType is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyResourceType = "ResourceType"
+
+ // InstancePropertyFilterKeyAssociationStatus is a InstancePropertyFilterKey enum value
+ InstancePropertyFilterKeyAssociationStatus = "AssociationStatus"
+)
+
+// InstancePropertyFilterKey_Values returns all elements of the InstancePropertyFilterKey enum
+func InstancePropertyFilterKey_Values() []string {
+ return []string{
+ InstancePropertyFilterKeyInstanceIds,
+ InstancePropertyFilterKeyAgentVersion,
+ InstancePropertyFilterKeyPingStatus,
+ InstancePropertyFilterKeyPlatformTypes,
+ InstancePropertyFilterKeyDocumentName,
+ InstancePropertyFilterKeyActivationIds,
+ InstancePropertyFilterKeyIamRole,
+ InstancePropertyFilterKeyResourceType,
+ InstancePropertyFilterKeyAssociationStatus,
+ }
+}
+
+const (
+ // InstancePropertyFilterOperatorEqual is a InstancePropertyFilterOperator enum value
+ InstancePropertyFilterOperatorEqual = "Equal"
+
+ // InstancePropertyFilterOperatorNotEqual is a InstancePropertyFilterOperator enum value
+ InstancePropertyFilterOperatorNotEqual = "NotEqual"
+
+ // InstancePropertyFilterOperatorBeginWith is a InstancePropertyFilterOperator enum value
+ InstancePropertyFilterOperatorBeginWith = "BeginWith"
+
+ // InstancePropertyFilterOperatorLessThan is a InstancePropertyFilterOperator enum value
+ InstancePropertyFilterOperatorLessThan = "LessThan"
+
+ // InstancePropertyFilterOperatorGreaterThan is a InstancePropertyFilterOperator enum value
+ InstancePropertyFilterOperatorGreaterThan = "GreaterThan"
+)
+
+// InstancePropertyFilterOperator_Values returns all elements of the InstancePropertyFilterOperator enum
+func InstancePropertyFilterOperator_Values() []string {
+ return []string{
+ InstancePropertyFilterOperatorEqual,
+ InstancePropertyFilterOperatorNotEqual,
+ InstancePropertyFilterOperatorBeginWith,
+ InstancePropertyFilterOperatorLessThan,
+ InstancePropertyFilterOperatorGreaterThan,
+ }
+}
+
const (
// InventoryAttributeDataTypeString is a InventoryAttributeDataType enum value
InventoryAttributeDataTypeString = "string"
@@ -59433,6 +62735,9 @@ const (
// OperatingSystemAmazonLinux2 is a OperatingSystem enum value
OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2"
+ // OperatingSystemAmazonLinux2022 is a OperatingSystem enum value
+ OperatingSystemAmazonLinux2022 = "AMAZON_LINUX_2022"
+
// OperatingSystemUbuntu is a OperatingSystem enum value
OperatingSystemUbuntu = "UBUNTU"
@@ -59459,6 +62764,12 @@ const (
// OperatingSystemRockyLinux is a OperatingSystem enum value
OperatingSystemRockyLinux = "ROCKY_LINUX"
+
+ // OperatingSystemAlmaLinux is a OperatingSystem enum value
+ OperatingSystemAlmaLinux = "ALMA_LINUX"
+
+ // OperatingSystemAmazonLinux2023 is a OperatingSystem enum value
+ OperatingSystemAmazonLinux2023 = "AMAZON_LINUX_2023"
)
// OperatingSystem_Values returns all elements of the OperatingSystem enum
@@ -59467,6 +62778,7 @@ func OperatingSystem_Values() []string {
OperatingSystemWindows,
OperatingSystemAmazonLinux,
OperatingSystemAmazonLinux2,
+ OperatingSystemAmazonLinux2022,
OperatingSystemUbuntu,
OperatingSystemRedhatEnterpriseLinux,
OperatingSystemSuse,
@@ -59476,6 +62788,8 @@ func OperatingSystem_Values() []string {
OperatingSystemMacos,
OperatingSystemRaspbian,
OperatingSystemRockyLinux,
+ OperatingSystemAlmaLinux,
+ OperatingSystemAmazonLinux2023,
}
}
@@ -59632,6 +62946,9 @@ const (
// OpsItemFilterKeyInsightByType is a OpsItemFilterKey enum value
OpsItemFilterKeyInsightByType = "InsightByType"
+
+ // OpsItemFilterKeyAccountId is a OpsItemFilterKey enum value
+ OpsItemFilterKeyAccountId = "AccountId"
)
// OpsItemFilterKey_Values returns all elements of the OpsItemFilterKey enum
@@ -59664,6 +62981,7 @@ func OpsItemFilterKey_Values() []string {
OpsItemFilterKeyChangeRequestByTemplate,
OpsItemFilterKeyChangeRequestByTargetsResourceGroup,
OpsItemFilterKeyInsightByType,
+ OpsItemFilterKeyAccountId,
}
}
@@ -60195,9 +63513,6 @@ const (
// ResourceTypeManagedInstance is a ResourceType enum value
ResourceTypeManagedInstance = "ManagedInstance"
- // ResourceTypeDocument is a ResourceType enum value
- ResourceTypeDocument = "Document"
-
// ResourceTypeEc2instance is a ResourceType enum value
ResourceTypeEc2instance = "EC2Instance"
)
@@ -60206,7 +63521,6 @@ const (
func ResourceType_Values() []string {
return []string{
ResourceTypeManagedInstance,
- ResourceTypeDocument,
ResourceTypeEc2instance,
}
}
@@ -60425,6 +63739,15 @@ const (
// StepExecutionFilterKeyAction is a StepExecutionFilterKey enum value
StepExecutionFilterKeyAction = "Action"
+
+ // StepExecutionFilterKeyParentStepExecutionId is a StepExecutionFilterKey enum value
+ StepExecutionFilterKeyParentStepExecutionId = "ParentStepExecutionId"
+
+ // StepExecutionFilterKeyParentStepIteration is a StepExecutionFilterKey enum value
+ StepExecutionFilterKeyParentStepIteration = "ParentStepIteration"
+
+ // StepExecutionFilterKeyParentStepIteratorValue is a StepExecutionFilterKey enum value
+ StepExecutionFilterKeyParentStepIteratorValue = "ParentStepIteratorValue"
)
// StepExecutionFilterKey_Values returns all elements of the StepExecutionFilterKey enum
@@ -60436,6 +63759,9 @@ func StepExecutionFilterKey_Values() []string {
StepExecutionFilterKeyStepExecutionId,
StepExecutionFilterKeyStepName,
StepExecutionFilterKeyAction,
+ StepExecutionFilterKeyParentStepExecutionId,
+ StepExecutionFilterKeyParentStepIteration,
+ StepExecutionFilterKeyParentStepIteratorValue,
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go
index c6d086705..a527ef256 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go
@@ -3,33 +3,32 @@
// Package ssm provides the client and types for making API
// requests to Amazon Simple Systems Manager (SSM).
//
-// Amazon Web Services Systems Manager is a collection of capabilities to help
-// you manage your applications and infrastructure running in the Amazon Web
-// Services Cloud;. Systems Manager simplifies application and resource management,
-// shortens the time to detect and resolve operational problems, and helps you
-// manage your Amazon Web Services resources securely at scale.
+// Amazon Web Services Systems Manager is the operations hub for your Amazon
+// Web Services applications and resources and a secure end-to-end management
+// solution for hybrid cloud environments that enables safe and secure operations
+// at scale.
//
// This reference is intended to be used with the Amazon Web Services Systems
// Manager User Guide (https://docs.aws.amazon.com/systems-manager/latest/userguide/).
-//
-// To get started, verify prerequisites. For more information, see Setting up
-// Amazon Web Services Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html).
+// To get started, see Setting up Amazon Web Services Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html).
//
// Related resources
//
-// - For information about how to use a Query API, see Making API requests
-// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html).
+// - For information about each of the capabilities that comprise Systems
+// Manager, see Systems Manager capabilities (https://docs.aws.amazon.com/systems-manager/latest/userguide/what-is-systems-manager.html#systems-manager-capabilities)
+// in the Amazon Web Services Systems Manager User Guide.
//
-// - For information about other API operations you can perform on EC2 instances,
-// see the Amazon EC2 API Reference (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/).
+// - For details about predefined runbooks for Automation, a capability of
+// Amazon Web Services Systems Manager, see the Systems Manager Automation
+// runbook reference (https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-runbook-reference.html) .
//
// - For information about AppConfig, a capability of Systems Manager, see
// the AppConfig User Guide (https://docs.aws.amazon.com/appconfig/latest/userguide/)
-// and the AppConfig API Reference (https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/).
+// and the AppConfig API Reference (https://docs.aws.amazon.com/appconfig/2019-10-09/APIReference/) .
//
// - For information about Incident Manager, a capability of Systems Manager,
-// see the Incident Manager User Guide (https://docs.aws.amazon.com/incident-manager/latest/userguide/)
-// and the Incident Manager API Reference (https://docs.aws.amazon.com/incident-manager/latest/APIReference/).
+// see the Systems Manager Incident Manager User Guide (https://docs.aws.amazon.com/incident-manager/latest/userguide/)
+// and the Systems Manager Incident Manager API Reference (https://docs.aws.amazon.com/incident-manager/latest/APIReference/) .
//
// See https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06 for more information on this service.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go
index 7ddc0cb3b..c574051f4 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go
@@ -122,10 +122,15 @@ const (
// ErrCodeDocumentPermissionLimit for service response error code
// "DocumentPermissionLimit".
//
- // The document can't be shared with more Amazon Web Services user accounts.
- // You can share a document with a maximum of 20 accounts. You can publicly
- // share up to five documents. If you need to increase this limit, contact Amazon
- // Web Services Support.
+ // The document can't be shared with more Amazon Web Services accounts. You
+ // can specify a maximum of 20 accounts per API operation to share a private
+ // document.
+ //
+ // By default, you can share a private document with a maximum of 1,000 accounts
+ // and publicly share up to five documents.
+ //
+ // If you need to increase the quota for privately or publicly shared Systems
+ // Manager documents, contact Amazon Web Services Support.
ErrCodeDocumentPermissionLimit = "DocumentPermissionLimit"
// ErrCodeDocumentVersionLimitExceeded for service response error code
@@ -362,14 +367,15 @@ const (
//
// * You don't have permission to access the managed node.
//
- // * Amazon Web Services Systems Manager Agent(SSM Agent) isn't running.
+ // * Amazon Web Services Systems Manager Agent (SSM Agent) isn't running.
// Verify that SSM Agent is running.
//
// * SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM
// Agent.
//
- // * The managed node isn't in valid state. Valid states are: Running, Pending,
- // Stopped, and Stopping. Invalid states are: Shutting-down and Terminated.
+ // * The managed node isn't in a valid state. Valid states are: Running,
+ // Pending, Stopped, and Stopping. Invalid states are: Shutting-down and
+ // Terminated.
ErrCodeInvalidInstanceId = "InvalidInstanceId"
// ErrCodeInvalidInstanceInformationFilterValue for service response error code
@@ -378,6 +384,12 @@ const (
// The specified filter value isn't valid.
ErrCodeInvalidInstanceInformationFilterValue = "InvalidInstanceInformationFilterValue"
+ // ErrCodeInvalidInstancePropertyFilterValue for service response error code
+ // "InvalidInstancePropertyFilterValue".
+ //
+ // The specified filter value isn't valid.
+ ErrCodeInvalidInstancePropertyFilterValue = "InvalidInstancePropertyFilterValue"
+
// ErrCodeInvalidInventoryGroupException for service response error code
// "InvalidInventoryGroupException".
//
@@ -501,7 +513,8 @@ const (
// The role name can't contain invalid characters. Also verify that you specified
// an IAM role for notifications that includes the required trust policy. For
// information about configuring the IAM role for Run Command notifications,
- // see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html)
+ // see Monitoring Systems Manager status changes using Amazon SNS notifications
+ // (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html)
// in the Amazon Web Services Systems Manager User Guide.
ErrCodeInvalidRole = "InvalidRole"
@@ -514,7 +527,7 @@ const (
// ErrCodeInvalidTag for service response error code
// "InvalidTag".
//
- // The specified tag key or value is not valid.
+ // The specified tag key or value isn't valid.
ErrCodeInvalidTag = "InvalidTag"
// ErrCodeInvalidTarget for service response error code
@@ -561,18 +574,39 @@ const (
// The inventory item size has exceeded the size limit.
ErrCodeItemSizeLimitExceededException = "ItemSizeLimitExceededException"
+ // ErrCodeMalformedResourcePolicyDocumentException for service response error code
+ // "MalformedResourcePolicyDocumentException".
+ //
+ // The specified policy document is malformed or invalid, or excessive PutResourcePolicy
+ // or DeleteResourcePolicy calls have been made.
+ ErrCodeMalformedResourcePolicyDocumentException = "MalformedResourcePolicyDocumentException"
+
// ErrCodeMaxDocumentSizeExceeded for service response error code
// "MaxDocumentSizeExceeded".
//
// The size limit of a document is 64 KB.
ErrCodeMaxDocumentSizeExceeded = "MaxDocumentSizeExceeded"
+ // ErrCodeOpsItemAccessDeniedException for service response error code
+ // "OpsItemAccessDeniedException".
+ //
+ // You don't have permission to view OpsItems in the specified account. Verify
+ // that your account is configured either as a Systems Manager delegated administrator
+ // or that you are logged into the Organizations management account.
+ ErrCodeOpsItemAccessDeniedException = "OpsItemAccessDeniedException"
+
// ErrCodeOpsItemAlreadyExistsException for service response error code
// "OpsItemAlreadyExistsException".
//
// The OpsItem already exists.
ErrCodeOpsItemAlreadyExistsException = "OpsItemAlreadyExistsException"
+ // ErrCodeOpsItemConflictException for service response error code
+ // "OpsItemConflictException".
+ //
+ // The specified OpsItem is in the process of being deleted.
+ ErrCodeOpsItemConflictException = "OpsItemConflictException"
+
// ErrCodeOpsItemInvalidParameterException for service response error code
// "OpsItemInvalidParameterException".
//
@@ -583,8 +617,7 @@ const (
// ErrCodeOpsItemLimitExceededException for service response error code
// "OpsItemLimitExceededException".
//
- // The request caused OpsItems to exceed one or more quotas. For information
- // about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits).
+ // The request caused OpsItems to exceed one or more quotas.
ErrCodeOpsItemLimitExceededException = "OpsItemLimitExceededException"
// ErrCodeOpsItemNotFoundException for service response error code
@@ -764,6 +797,41 @@ const (
// in the Amazon Web Services General Reference.
ErrCodeResourceLimitExceededException = "ResourceLimitExceededException"
+ // ErrCodeResourceNotFoundException for service response error code
+ // "ResourceNotFoundException".
+ //
+ // The specified parameter to be shared could not be found.
+ ErrCodeResourceNotFoundException = "ResourceNotFoundException"
+
+ // ErrCodeResourcePolicyConflictException for service response error code
+ // "ResourcePolicyConflictException".
+ //
+ // The hash provided in the call doesn't match the stored hash. This exception
+ // is thrown when trying to update an obsolete policy version or when multiple
+ // requests to update a policy are sent.
+ ErrCodeResourcePolicyConflictException = "ResourcePolicyConflictException"
+
+ // ErrCodeResourcePolicyInvalidParameterException for service response error code
+ // "ResourcePolicyInvalidParameterException".
+ //
+ // One or more parameters specified for the call aren't valid. Verify the parameters
+ // and their values and try again.
+ ErrCodeResourcePolicyInvalidParameterException = "ResourcePolicyInvalidParameterException"
+
+ // ErrCodeResourcePolicyLimitExceededException for service response error code
+ // "ResourcePolicyLimitExceededException".
+ //
+ // The PutResourcePolicy API action enforces two limits. A policy can't be greater
+ // than 1024 bytes in size. And only one policy can be attached to OpsItemGroup.
+ // Verify these limits and try again.
+ ErrCodeResourcePolicyLimitExceededException = "ResourcePolicyLimitExceededException"
+
+ // ErrCodeResourcePolicyNotFoundException for service response error code
+ // "ResourcePolicyNotFoundException".
+ //
+ // No policies with the specified policy ID and hash could be found.
+ ErrCodeResourcePolicyNotFoundException = "ResourcePolicyNotFoundException"
+
// ErrCodeServiceSettingNotFound for service response error code
// "ServiceSettingNotFound".
//
@@ -833,7 +901,7 @@ const (
// Patching for applications released by Microsoft is only available on EC2
// instances and advanced instances. To patch applications released by Microsoft
// on on-premises servers and VMs, you must enable advanced instances. For more
- // information, see Enabling the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
+ // information, see Turning on the advanced-instances tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html)
// in the Amazon Web Services Systems Manager User Guide.
ErrCodeUnsupportedFeatureRequiredException = "UnsupportedFeatureRequiredException"
@@ -870,8 +938,8 @@ const (
// "UnsupportedPlatformType".
//
// The document doesn't support the platform type of the given managed node
- // ID(s). For example, you sent an document for a Windows managed node to a
- // Linux node.
+ // IDs. For example, you sent an document for a Windows managed node to a Linux
+ // node.
ErrCodeUnsupportedPlatformType = "UnsupportedPlatformType"
)
@@ -929,6 +997,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"InvalidFilterValue": newErrorInvalidFilterValue,
"InvalidInstanceId": newErrorInvalidInstanceId,
"InvalidInstanceInformationFilterValue": newErrorInvalidInstanceInformationFilterValue,
+ "InvalidInstancePropertyFilterValue": newErrorInvalidInstancePropertyFilterValue,
"InvalidInventoryGroupException": newErrorInvalidInventoryGroupException,
"InvalidInventoryItemContextException": newErrorInvalidInventoryItemContextException,
"InvalidInventoryRequestException": newErrorInvalidInventoryRequestException,
@@ -957,8 +1026,11 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"InvocationDoesNotExist": newErrorInvocationDoesNotExist,
"ItemContentMismatchException": newErrorItemContentMismatchException,
"ItemSizeLimitExceededException": newErrorItemSizeLimitExceededException,
+ "MalformedResourcePolicyDocumentException": newErrorMalformedResourcePolicyDocumentException,
"MaxDocumentSizeExceeded": newErrorMaxDocumentSizeExceeded,
+ "OpsItemAccessDeniedException": newErrorOpsItemAccessDeniedException,
"OpsItemAlreadyExistsException": newErrorOpsItemAlreadyExistsException,
+ "OpsItemConflictException": newErrorOpsItemConflictException,
"OpsItemInvalidParameterException": newErrorOpsItemInvalidParameterException,
"OpsItemLimitExceededException": newErrorOpsItemLimitExceededException,
"OpsItemNotFoundException": newErrorOpsItemNotFoundException,
@@ -985,6 +1057,11 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"ResourceDataSyncNotFoundException": newErrorResourceDataSyncNotFoundException,
"ResourceInUseException": newErrorResourceInUseException,
"ResourceLimitExceededException": newErrorResourceLimitExceededException,
+ "ResourceNotFoundException": newErrorResourceNotFoundException,
+ "ResourcePolicyConflictException": newErrorResourcePolicyConflictException,
+ "ResourcePolicyInvalidParameterException": newErrorResourcePolicyInvalidParameterException,
+ "ResourcePolicyLimitExceededException": newErrorResourcePolicyLimitExceededException,
+ "ResourcePolicyNotFoundException": newErrorResourcePolicyNotFoundException,
"ServiceSettingNotFound": newErrorServiceSettingNotFound,
"StatusUnchanged": newErrorStatusUnchanged,
"SubTypeCountLimitExceededException": newErrorSubTypeCountLimitExceededException,
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
new file mode 100644
index 000000000..827bd5194
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
@@ -0,0 +1,2406 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+const opCreateToken = "CreateToken"
+
+// CreateTokenRequest generates a "aws/request.Request" representing the
+// client's request for the CreateToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateToken for more information on using the CreateToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateTokenRequest method.
+// req, resp := client.CreateTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken
+func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) {
+ op := &request.Operation{
+ Name: opCreateToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/token",
+ }
+
+ if input == nil {
+ input = &CreateTokenInput{}
+ }
+
+ output = &CreateTokenOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// CreateToken API operation for AWS SSO OIDC.
+//
+// Creates and returns access and refresh tokens for clients that are authenticated
+// using client secrets. The access token can be used to fetch short-term credentials
+// for the assigned AWS accounts or to access application APIs using bearer
+// authentication.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS SSO OIDC's
+// API operation CreateToken for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidRequestException
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// - InvalidClientException
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret.
+//
+// - InvalidGrantException
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateToken request with an invalid grant type.
+//
+// - UnauthorizedClientException
+// Indicates that the client is not currently authorized to make the request.
+// This can happen when a clientId is not issued for a public client.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
+// - InvalidScopeException
+// Indicates that the scope provided in the request is invalid.
+//
+// - AuthorizationPendingException
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+//
+// - SlowDownException
+// Indicates that the client is making the request too frequently and is more
+// than the service can handle.
+//
+// - AccessDeniedException
+// You do not have sufficient access to perform this action.
+//
+// - ExpiredTokenException
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+//
+// - InternalServerException
+// Indicates that an error from the service occurred while trying to process
+// a request.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken
+func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) {
+ req, out := c.CreateTokenRequest(input)
+ return out, req.Send()
+}
+
+// CreateTokenWithContext is the same as CreateToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) {
+ req, out := c.CreateTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateTokenWithIAM = "CreateTokenWithIAM"
+
+// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the
+// client's request for the CreateTokenWithIAM operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateTokenWithIAMRequest method.
+// req, resp := client.CreateTokenWithIAMRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
+func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) {
+ op := &request.Operation{
+ Name: opCreateTokenWithIAM,
+ HTTPMethod: "POST",
+ HTTPPath: "/token?aws_iam=t",
+ }
+
+ if input == nil {
+ input = &CreateTokenWithIAMInput{}
+ }
+
+ output = &CreateTokenWithIAMOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateTokenWithIAM API operation for AWS SSO OIDC.
+//
+// Creates and returns access and refresh tokens for clients and applications
+// that are authenticated using IAM entities. The access token can be used to
+// fetch short-term credentials for the assigned Amazon Web Services accounts
+// or to access application APIs using bearer authentication.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS SSO OIDC's
+// API operation CreateTokenWithIAM for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidRequestException
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// - InvalidClientException
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret.
+//
+// - InvalidGrantException
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateToken request with an invalid grant type.
+//
+// - UnauthorizedClientException
+// Indicates that the client is not currently authorized to make the request.
+// This can happen when a clientId is not issued for a public client.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
+// - InvalidScopeException
+// Indicates that the scope provided in the request is invalid.
+//
+// - AuthorizationPendingException
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+//
+// - SlowDownException
+// Indicates that the client is making the request too frequently and is more
+// than the service can handle.
+//
+// - AccessDeniedException
+// You do not have sufficient access to perform this action.
+//
+// - ExpiredTokenException
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+//
+// - InternalServerException
+// Indicates that an error from the service occurred while trying to process
+// a request.
+//
+// - InvalidRequestRegionException
+// Indicates that a token provided as input to the request was issued by and
+// is only usable by calling IAM Identity Center endpoints in another region.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
+func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) {
+ req, out := c.CreateTokenWithIAMRequest(input)
+ return out, req.Send()
+}
+
+// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateTokenWithIAM for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) {
+ req, out := c.CreateTokenWithIAMRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRegisterClient = "RegisterClient"
+
+// RegisterClientRequest generates a "aws/request.Request" representing the
+// client's request for the RegisterClient operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RegisterClient for more information on using the RegisterClient
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the RegisterClientRequest method.
+// req, resp := client.RegisterClientRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
+func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) {
+ op := &request.Operation{
+ Name: opRegisterClient,
+ HTTPMethod: "POST",
+ HTTPPath: "/client/register",
+ }
+
+ if input == nil {
+ input = &RegisterClientInput{}
+ }
+
+ output = &RegisterClientOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// RegisterClient API operation for AWS SSO OIDC.
+//
+// Registers a client with IAM Identity Center. This allows clients to initiate
+// device authorization. The output should be persisted for reuse through many
+// authentication requests.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS SSO OIDC's
+// API operation RegisterClient for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidRequestException
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// - InvalidScopeException
+// Indicates that the scope provided in the request is invalid.
+//
+// - InvalidClientMetadataException
+// Indicates that the client information sent in the request during registration
+// is invalid.
+//
+// - InternalServerException
+// Indicates that an error from the service occurred while trying to process
+// a request.
+//
+// - InvalidRedirectUriException
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
+func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) {
+ req, out := c.RegisterClientRequest(input)
+ return out, req.Send()
+}
+
+// RegisterClientWithContext is the same as RegisterClient with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RegisterClient for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) {
+ req, out := c.RegisterClientRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStartDeviceAuthorization = "StartDeviceAuthorization"
+
+// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the
+// client's request for the StartDeviceAuthorization operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the StartDeviceAuthorizationRequest method.
+// req, resp := client.StartDeviceAuthorizationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization
+func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) {
+ op := &request.Operation{
+ Name: opStartDeviceAuthorization,
+ HTTPMethod: "POST",
+ HTTPPath: "/device_authorization",
+ }
+
+ if input == nil {
+ input = &StartDeviceAuthorizationInput{}
+ }
+
+ output = &StartDeviceAuthorizationOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// StartDeviceAuthorization API operation for AWS SSO OIDC.
+//
+// Initiates device authorization by requesting a pair of verification codes
+// from the authorization service.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS SSO OIDC's
+// API operation StartDeviceAuthorization for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidRequestException
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// - InvalidClientException
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret.
+//
+// - UnauthorizedClientException
+// Indicates that the client is not currently authorized to make the request.
+// This can happen when a clientId is not issued for a public client.
+//
+// - SlowDownException
+// Indicates that the client is making the request too frequently and is more
+// than the service can handle.
+//
+// - InternalServerException
+// Indicates that an error from the service occurred while trying to process
+// a request.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization
+func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) {
+ req, out := c.StartDeviceAuthorizationRequest(input)
+ return out, req.Send()
+}
+
+// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StartDeviceAuthorization for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) {
+ req, out := c.StartDeviceAuthorizationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// You do not have sufficient access to perform this action.
+type AccessDeniedException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be access_denied.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AccessDeniedException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AccessDeniedException) GoString() string {
+ return s.String()
+}
+
+func newErrorAccessDeniedException(v protocol.ResponseMetadata) error {
+ return &AccessDeniedException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *AccessDeniedException) Code() string {
+ return "AccessDeniedException"
+}
+
+// Message returns the exception's message.
+func (s *AccessDeniedException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *AccessDeniedException) OrigErr() error {
+ return nil
+}
+
+func (s *AccessDeniedException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *AccessDeniedException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *AccessDeniedException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+type AuthorizationPendingException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be authorization_pending.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AuthorizationPendingException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s AuthorizationPendingException) GoString() string {
+ return s.String()
+}
+
+func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error {
+ return &AuthorizationPendingException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *AuthorizationPendingException) Code() string {
+ return "AuthorizationPendingException"
+}
+
+// Message returns the exception's message.
+func (s *AuthorizationPendingException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *AuthorizationPendingException) OrigErr() error {
+ return nil
+}
+
+func (s *AuthorizationPendingException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *AuthorizationPendingException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *AuthorizationPendingException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type CreateTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique identifier string for the client or application. This value comes
+ // from the result of the RegisterClient API.
+ //
+ // ClientId is a required field
+ ClientId *string `locationName:"clientId" type:"string" required:"true"`
+
+ // A secret string generated for the client. This value should come from the
+ // persisted result of the RegisterClient API.
+ //
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ //
+ // ClientSecret is a required field
+ ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. The
+ // short-term code is used to identify this authorization request. This grant
+ // type is currently unsupported for the CreateToken API.
+ Code *string `locationName:"code" type:"string"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Device Code grant type. This short-term
+ // code is used to identify this authorization request. This comes from the
+ // result of the StartDeviceAuthorization API.
+ DeviceCode *string `locationName:"deviceCode" type:"string"`
+
+ // Supports the following OAuth grant types: Device Code and Refresh Token.
+ // Specify either of the following values, depending on the grant type that
+ // you want:
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // For information about how to obtain the device code, see the StartDeviceAuthorization
+ // topic.
+ //
+ // GrantType is a required field
+ GrantType *string `locationName:"grantType" type:"string" required:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered
+ // to receive the authorization code.
+ RedirectUri *string `locationName:"redirectUri" type:"string"`
+
+ // Used only when calling this API for the Refresh Token grant type. This token
+ // is used to refresh short-term tokens, such as the access token, that might
+ // expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // The list of scopes for which authorization is requested. The access token
+ // that is issued is limited to the scopes that are granted. If this value is
+ // not specified, IAM Identity Center authorizes all scopes that are configured
+ // for the client during the call to RegisterClient.
+ Scope []*string `locationName:"scope" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"}
+ if s.ClientId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientId"))
+ }
+ if s.ClientSecret == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientSecret"))
+ }
+ if s.GrantType == nil {
+ invalidParams.Add(request.NewErrParamRequired("GrantType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput {
+ s.ClientId = &v
+ return s
+}
+
+// SetClientSecret sets the ClientSecret field's value.
+func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput {
+ s.ClientSecret = &v
+ return s
+}
+
+// SetCode sets the Code field's value.
+func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput {
+ s.Code = &v
+ return s
+}
+
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput {
+ s.CodeVerifier = &v
+ return s
+}
+
+// SetDeviceCode sets the DeviceCode field's value.
+func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput {
+ s.DeviceCode = &v
+ return s
+}
+
+// SetGrantType sets the GrantType field's value.
+func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput {
+ s.GrantType = &v
+ return s
+}
+
+// SetRedirectUri sets the RedirectUri field's value.
+func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput {
+ s.RedirectUri = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput {
+ s.Scope = v
+ return s
+}
+
+type CreateTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ //
+ // AccessToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
+
+ // The idToken is not implemented or supported. For more information about the
+ // features and limitations of the current IAM Identity Center OIDC implementation,
+ // see Considerations for Using this Guide in the IAM Identity Center OIDC API
+ // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // A JSON Web Token (JWT) that identifies who is associated with the issued
+ // access token.
+ //
+ // IdToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // Used to notify the client that the returned token is an access token. The
+ // supported token type is Bearer.
+ TokenType *string `locationName:"tokenType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetExpiresIn sets the ExpiresIn field's value.
+func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput {
+ s.ExpiresIn = &v
+ return s
+}
+
+// SetIdToken sets the IdToken field's value.
+func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput {
+ s.IdToken = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetTokenType sets the TokenType field's value.
+func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput {
+ s.TokenType = &v
+ return s
+}
+
+type CreateTokenWithIAMInput struct {
+ _ struct{} `type:"structure"`
+
+ // Used only when calling this API for the JWT Bearer grant type. This value
+ // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize
+ // a trusted token issuer, configure the JWT Bearer GrantOptions for the application.
+ //
+ // Assertion is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ Assertion *string `locationName:"assertion" type:"string" sensitive:"true"`
+
+ // The unique identifier string for the client or application. This value is
+ // an application ARN that has OAuth grants configured.
+ //
+ // ClientId is a required field
+ ClientId *string `locationName:"clientId" type:"string" required:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // short-term code is used to identify this authorization request. The code
+ // is obtained through a redirect from IAM Identity Center to a redirect URI
+ // persisted in the Authorization Code GrantOptions for the application.
+ Code *string `locationName:"code" type:"string"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
+ // Supports the following OAuth grant types: Authorization Code, Refresh Token,
+ // JWT Bearer, and Token Exchange. Specify one of the following values, depending
+ // on the grant type that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+ //
+ // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
+ //
+ // GrantType is a required field
+ GrantType *string `locationName:"grantType" type:"string" required:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered
+ // to receive the authorization code.
+ RedirectUri *string `locationName:"redirectUri" type:"string"`
+
+ // Used only when calling this API for the Refresh Token grant type. This token
+ // is used to refresh short-term tokens, such as the access token, that might
+ // expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that the requester can receive. The following
+ // values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ RequestedTokenType *string `locationName:"requestedTokenType" type:"string"`
+
+ // The list of scopes for which authorization is requested. The access token
+ // that is issued is limited to the scopes that are granted. If the value is
+ // not specified, IAM Identity Center authorizes all scopes configured for the
+ // application, including the following default scopes: openid, aws, sts:identity_context.
+ Scope []*string `locationName:"scope" type:"list"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the subject of the exchange. The value of the subject token must
+ // be an access token issued by IAM Identity Center to a different client or
+ // application. The access token must have authorized scopes that indicate the
+ // requested application as a target audience.
+ //
+ // SubjectToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that is passed as the subject of the exchange.
+ // The following value is supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ SubjectTokenType *string `locationName:"subjectTokenType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateTokenWithIAMInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"}
+ if s.ClientId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientId"))
+ }
+ if s.GrantType == nil {
+ invalidParams.Add(request.NewErrParamRequired("GrantType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssertion sets the Assertion field's value.
+func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput {
+ s.Assertion = &v
+ return s
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput {
+ s.ClientId = &v
+ return s
+}
+
+// SetCode sets the Code field's value.
+func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput {
+ s.Code = &v
+ return s
+}
+
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput {
+ s.CodeVerifier = &v
+ return s
+}
+
+// SetGrantType sets the GrantType field's value.
+func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput {
+ s.GrantType = &v
+ return s
+}
+
+// SetRedirectUri sets the RedirectUri field's value.
+func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput {
+ s.RedirectUri = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetRequestedTokenType sets the RequestedTokenType field's value.
+func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput {
+ s.RequestedTokenType = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput {
+ s.Scope = v
+ return s
+}
+
+// SetSubjectToken sets the SubjectToken field's value.
+func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput {
+ s.SubjectToken = &v
+ return s
+}
+
+// SetSubjectTokenType sets the SubjectTokenType field's value.
+func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput {
+ s.SubjectTokenType = &v
+ return s
+}
+
+type CreateTokenWithIAMOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ //
+ // AccessToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
+
+ // A JSON Web Token (JWT) that identifies the user associated with the issued
+ // access token.
+ //
+ // IdToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
+
+ // Indicates the type of tokens that are issued by IAM Identity Center. The
+ // following values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ IssuedTokenType *string `locationName:"issuedTokenType" type:"string"`
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // The list of scopes for which authorization is granted. The access token that
+ // is issued is limited to the scopes that are granted.
+ Scope []*string `locationName:"scope" type:"list"`
+
+ // Used to notify the requester that the returned token is an access token.
+ // The supported token type is Bearer.
+ TokenType *string `locationName:"tokenType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetExpiresIn sets the ExpiresIn field's value.
+func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput {
+ s.ExpiresIn = &v
+ return s
+}
+
+// SetIdToken sets the IdToken field's value.
+func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput {
+ s.IdToken = &v
+ return s
+}
+
+// SetIssuedTokenType sets the IssuedTokenType field's value.
+func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput {
+ s.IssuedTokenType = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput {
+ s.Scope = v
+ return s
+}
+
+// SetTokenType sets the TokenType field's value.
+func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput {
+ s.TokenType = &v
+ return s
+}
+
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+type ExpiredTokenException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be expired_token.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExpiredTokenException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ExpiredTokenException) GoString() string {
+ return s.String()
+}
+
+func newErrorExpiredTokenException(v protocol.ResponseMetadata) error {
+ return &ExpiredTokenException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *ExpiredTokenException) Code() string {
+ return "ExpiredTokenException"
+}
+
+// Message returns the exception's message.
+func (s *ExpiredTokenException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *ExpiredTokenException) OrigErr() error {
+ return nil
+}
+
+func (s *ExpiredTokenException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *ExpiredTokenException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *ExpiredTokenException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that an error from the service occurred while trying to process
+// a request.
+type InternalServerException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be server_error.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InternalServerException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InternalServerException) GoString() string {
+ return s.String()
+}
+
+func newErrorInternalServerException(v protocol.ResponseMetadata) error {
+ return &InternalServerException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InternalServerException) Code() string {
+ return "InternalServerException"
+}
+
+// Message returns the exception's message.
+func (s *InternalServerException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InternalServerException) OrigErr() error {
+ return nil
+}
+
+func (s *InternalServerException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InternalServerException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InternalServerException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret.
+type InvalidClientException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_client.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidClientException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidClientException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidClientException(v protocol.ResponseMetadata) error {
+ return &InvalidClientException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidClientException) Code() string {
+ return "InvalidClientException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidClientException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidClientException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidClientException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidClientException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidClientException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the client information sent in the request during registration
+// is invalid.
+type InvalidClientMetadataException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_client_metadata.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidClientMetadataException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidClientMetadataException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error {
+ return &InvalidClientMetadataException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidClientMetadataException) Code() string {
+ return "InvalidClientMetadataException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidClientMetadataException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidClientMetadataException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidClientMetadataException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidClientMetadataException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidClientMetadataException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateToken request with an invalid grant type.
+type InvalidGrantException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_grant.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidGrantException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidGrantException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidGrantException(v protocol.ResponseMetadata) error {
+ return &InvalidGrantException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidGrantException) Code() string {
+ return "InvalidGrantException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidGrantException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidGrantException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidGrantException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidGrantException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidGrantException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_redirect_uri.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error {
+ return &InvalidRedirectUriException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRedirectUriException) Code() string {
+ return "InvalidRedirectUriException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRedirectUriException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRedirectUriException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRedirectUriException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRedirectUriException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_request.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
+ return &InvalidRequestException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRequestException) Code() string {
+ return "InvalidRequestException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRequestException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRequestException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRequestException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRequestException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that a token provided as input to the request was issued by and
+// is only usable by calling IAM Identity Center endpoints in another region.
+type InvalidRequestRegionException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Indicates the IAM Identity Center endpoint which the requester may call with
+ // this token.
+ Endpoint *string `locationName:"endpoint" type:"string"`
+
+ // Single error code. For this exception the value will be invalid_request.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ // Indicates the region which the requester may call with this token.
+ Region *string `locationName:"region" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestRegionException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestRegionException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error {
+ return &InvalidRequestRegionException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRequestRegionException) Code() string {
+ return "InvalidRequestRegionException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRequestRegionException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRequestRegionException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRequestRegionException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRequestRegionException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRequestRegionException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the scope provided in the request is invalid.
+type InvalidScopeException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_scope.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidScopeException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidScopeException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidScopeException(v protocol.ResponseMetadata) error {
+ return &InvalidScopeException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidScopeException) Code() string {
+ return "InvalidScopeException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidScopeException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidScopeException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidScopeException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidScopeException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidScopeException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type RegisterClientInput struct {
+ _ struct{} `type:"structure"`
+
+ // The friendly name of the client.
+ //
+ // ClientName is a required field
+ ClientName *string `locationName:"clientName" type:"string" required:"true"`
+
+ // The type of client. The service supports only public as a client type. Anything
+ // other than public will be rejected by the service.
+ //
+ // ClientType is a required field
+ ClientType *string `locationName:"clientType" type:"string" required:"true"`
+
+ // This IAM Identity Center application ARN is used to define administrator-managed
+ // configuration for public client access to resources. At authorization, the
+ // scopes, grants, and redirect URI available to this client will be restricted
+ // by this application resource.
+ EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"`
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list
+ // is used to restrict the token granting flows available to the client.
+ GrantTypes []*string `locationName:"grantTypes" type:"list"`
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string `locationName:"issuerUrl" type:"string"`
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent
+ // can be redirected back to.
+ RedirectUris []*string `locationName:"redirectUris" type:"list"`
+
+ // The list of scopes that are defined by the client. Upon authorization, this
+ // list is used to restrict permissions when granting an access token.
+ Scopes []*string `locationName:"scopes" type:"list"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RegisterClientInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RegisterClientInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RegisterClientInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"}
+ if s.ClientName == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientName"))
+ }
+ if s.ClientType == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientName sets the ClientName field's value.
+func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput {
+ s.ClientName = &v
+ return s
+}
+
+// SetClientType sets the ClientType field's value.
+func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput {
+ s.ClientType = &v
+ return s
+}
+
+// SetEntitledApplicationArn sets the EntitledApplicationArn field's value.
+func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput {
+ s.EntitledApplicationArn = &v
+ return s
+}
+
+// SetGrantTypes sets the GrantTypes field's value.
+func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput {
+ s.GrantTypes = v
+ return s
+}
+
+// SetIssuerUrl sets the IssuerUrl field's value.
+func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput {
+ s.IssuerUrl = &v
+ return s
+}
+
+// SetRedirectUris sets the RedirectUris field's value.
+func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput {
+ s.RedirectUris = v
+ return s
+}
+
+// SetScopes sets the Scopes field's value.
+func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
+ s.Scopes = v
+ return s
+}
+
+type RegisterClientOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An endpoint that the client can use to request authorization.
+ AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"`
+
+ // The unique identifier string for each client. This client uses this identifier
+ // to get authenticated by the service in subsequent calls.
+ ClientId *string `locationName:"clientId" type:"string"`
+
+ // Indicates the time at which the clientId and clientSecret were issued.
+ ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"`
+
+ // A secret string generated for the client. The client will use this string
+ // to get authenticated by the service in subsequent calls.
+ //
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by RegisterClientOutput's
+ // String and GoString methods.
+ ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"`
+
+ // Indicates the time at which the clientId and clientSecret will become invalid.
+ ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"`
+
+ // An endpoint that the client can use to create tokens.
+ TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RegisterClientOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s RegisterClientOutput) GoString() string {
+ return s.String()
+}
+
+// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value.
+func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput {
+ s.AuthorizationEndpoint = &v
+ return s
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput {
+ s.ClientId = &v
+ return s
+}
+
+// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value.
+func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput {
+ s.ClientIdIssuedAt = &v
+ return s
+}
+
+// SetClientSecret sets the ClientSecret field's value.
+func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput {
+ s.ClientSecret = &v
+ return s
+}
+
+// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value.
+func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput {
+ s.ClientSecretExpiresAt = &v
+ return s
+}
+
+// SetTokenEndpoint sets the TokenEndpoint field's value.
+func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput {
+ s.TokenEndpoint = &v
+ return s
+}
+
+// Indicates that the client is making the request too frequently and is more
+// than the service can handle.
+type SlowDownException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be slow_down.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SlowDownException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SlowDownException) GoString() string {
+ return s.String()
+}
+
+func newErrorSlowDownException(v protocol.ResponseMetadata) error {
+ return &SlowDownException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *SlowDownException) Code() string {
+ return "SlowDownException"
+}
+
+// Message returns the exception's message.
+func (s *SlowDownException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *SlowDownException) OrigErr() error {
+ return nil
+}
+
+func (s *SlowDownException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *SlowDownException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *SlowDownException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+type StartDeviceAuthorizationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The unique identifier string for the client that is registered with IAM Identity
+ // Center. This value should come from the persisted result of the RegisterClient
+ // API operation.
+ //
+ // ClientId is a required field
+ ClientId *string `locationName:"clientId" type:"string" required:"true"`
+
+ // A secret string that is generated for the client. This value should come
+ // from the persisted result of the RegisterClient API operation.
+ //
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's
+ // String and GoString methods.
+ //
+ // ClientSecret is a required field
+ ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
+
+ // The URL for the Amazon Web Services access portal. For more information,
+ // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
+ // in the IAM Identity Center User Guide.
+ //
+ // StartUrl is a required field
+ StartUrl *string `locationName:"startUrl" type:"string" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s StartDeviceAuthorizationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s StartDeviceAuthorizationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StartDeviceAuthorizationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"}
+ if s.ClientId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientId"))
+ }
+ if s.ClientSecret == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientSecret"))
+ }
+ if s.StartUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("StartUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput {
+ s.ClientId = &v
+ return s
+}
+
+// SetClientSecret sets the ClientSecret field's value.
+func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput {
+ s.ClientSecret = &v
+ return s
+}
+
+// SetStartUrl sets the StartUrl field's value.
+func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput {
+ s.StartUrl = &v
+ return s
+}
+
+type StartDeviceAuthorizationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The short-lived code that is used by the device when polling for a session
+ // token.
+ DeviceCode *string `locationName:"deviceCode" type:"string"`
+
+ // Indicates the number of seconds in which the verification code will become
+ // invalid.
+ ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
+
+ // Indicates the number of seconds the client must wait between attempts when
+ // polling for a session.
+ Interval *int64 `locationName:"interval" type:"integer"`
+
+ // A one-time user verification code. This is needed to authorize an in-use
+ // device.
+ UserCode *string `locationName:"userCode" type:"string"`
+
+ // The URI of the verification page that takes the userCode to authorize the
+ // device.
+ VerificationUri *string `locationName:"verificationUri" type:"string"`
+
+ // An alternate URL that the client can use to automatically launch a browser.
+ // This process skips the manual step in which the user visits the verification
+ // page and enters their code.
+ VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s StartDeviceAuthorizationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s StartDeviceAuthorizationOutput) GoString() string {
+ return s.String()
+}
+
+// SetDeviceCode sets the DeviceCode field's value.
+func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput {
+ s.DeviceCode = &v
+ return s
+}
+
+// SetExpiresIn sets the ExpiresIn field's value.
+func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput {
+ s.ExpiresIn = &v
+ return s
+}
+
+// SetInterval sets the Interval field's value.
+func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput {
+ s.Interval = &v
+ return s
+}
+
+// SetUserCode sets the UserCode field's value.
+func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput {
+ s.UserCode = &v
+ return s
+}
+
+// SetVerificationUri sets the VerificationUri field's value.
+func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput {
+ s.VerificationUri = &v
+ return s
+}
+
+// SetVerificationUriComplete sets the VerificationUriComplete field's value.
+func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput {
+ s.VerificationUriComplete = &v
+ return s
+}
+
+// Indicates that the client is not currently authorized to make the request.
+// This can happen when a clientId is not issued for a public client.
+type UnauthorizedClientException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be unauthorized_client.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnauthorizedClientException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnauthorizedClientException) GoString() string {
+ return s.String()
+}
+
+func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error {
+ return &UnauthorizedClientException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *UnauthorizedClientException) Code() string {
+ return "UnauthorizedClientException"
+}
+
+// Message returns the exception's message.
+func (s *UnauthorizedClientException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *UnauthorizedClientException) OrigErr() error {
+ return nil
+}
+
+func (s *UnauthorizedClientException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *UnauthorizedClientException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *UnauthorizedClientException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
+// Indicates that the grant type in the request is not supported by the service.
+type UnsupportedGrantTypeException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be unsupported_grant_type.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnsupportedGrantTypeException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s UnsupportedGrantTypeException) GoString() string {
+ return s.String()
+}
+
+func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error {
+ return &UnsupportedGrantTypeException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *UnsupportedGrantTypeException) Code() string {
+ return "UnsupportedGrantTypeException"
+}
+
+// Message returns the exception's message.
+func (s *UnsupportedGrantTypeException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *UnsupportedGrantTypeException) OrigErr() error {
+ return nil
+}
+
+func (s *UnsupportedGrantTypeException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *UnsupportedGrantTypeException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *UnsupportedGrantTypeException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
new file mode 100644
index 000000000..083568c61
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
@@ -0,0 +1,67 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package ssooidc provides the client and types for making API
+// requests to AWS SSO OIDC.
+//
+// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
+// client (such as CLI or a native application) to register with IAM Identity
+// Center. The service also enables the client to fetch the user’s access
+// token upon successful authentication and authorization with IAM Identity
+// Center.
+//
+// IAM Identity Center uses the sso and identitystore API namespaces.
+//
+// # Considerations for Using This Guide
+//
+// Before you begin using this guide, we recommend that you first review the
+// following important information about how the IAM Identity Center OIDC service
+// works.
+//
+// - The IAM Identity Center OIDC service currently implements only the portions
+// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628
+// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single
+// sign-on authentication with the CLI.
+//
+// - With older versions of the CLI, the service only emits OIDC access tokens,
+// so to obtain a new token, users must explicitly re-authenticate. To access
+// the OIDC flow that supports token refresh and doesn’t require re-authentication,
+// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI
+// V2) with support for OIDC token refresh and configurable IAM Identity
+// Center session durations. For more information, see Configure Amazon Web
+// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html).
+//
+// - The access tokens provided by this service grant access to all Amazon
+// Web Services account entitlements assigned to an IAM Identity Center user,
+// not just a particular application.
+//
+// - The documentation in this guide does not describe the mechanism to convert
+// the access token into Amazon Web Services Auth (“sigv4”) credentials
+// for use with IAM-protected Amazon Web Services service endpoints. For
+// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
+// in the IAM Identity Center Portal API Reference Guide.
+//
+// For general information about IAM Identity Center, see What is IAM Identity
+// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
+// in the IAM Identity Center User Guide.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service.
+//
+// See ssooidc package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/
+//
+// # Using the Client
+//
+// To contact AWS SSO OIDC with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS SSO OIDC client SSOOIDC for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New
+package ssooidc
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
new file mode 100644
index 000000000..cadf4584d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
@@ -0,0 +1,123 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+const (
+
+ // ErrCodeAccessDeniedException for service response error code
+ // "AccessDeniedException".
+ //
+ // You do not have sufficient access to perform this action.
+ ErrCodeAccessDeniedException = "AccessDeniedException"
+
+ // ErrCodeAuthorizationPendingException for service response error code
+ // "AuthorizationPendingException".
+ //
+ // Indicates that a request to authorize a client with an access user session
+ // token is pending.
+ ErrCodeAuthorizationPendingException = "AuthorizationPendingException"
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // Indicates that the token issued by the service is expired and is no longer
+ // valid.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeInternalServerException for service response error code
+ // "InternalServerException".
+ //
+ // Indicates that an error from the service occurred while trying to process
+ // a request.
+ ErrCodeInternalServerException = "InternalServerException"
+
+ // ErrCodeInvalidClientException for service response error code
+ // "InvalidClientException".
+ //
+ // Indicates that the clientId or clientSecret in the request is invalid. For
+ // example, this can occur when a client sends an incorrect clientId or an expired
+ // clientSecret.
+ ErrCodeInvalidClientException = "InvalidClientException"
+
+ // ErrCodeInvalidClientMetadataException for service response error code
+ // "InvalidClientMetadataException".
+ //
+ // Indicates that the client information sent in the request during registration
+ // is invalid.
+ ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException"
+
+ // ErrCodeInvalidGrantException for service response error code
+ // "InvalidGrantException".
+ //
+ // Indicates that a request contains an invalid grant. This can occur if a client
+ // makes a CreateToken request with an invalid grant type.
+ ErrCodeInvalidGrantException = "InvalidGrantException"
+
+ // ErrCodeInvalidRedirectUriException for service response error code
+ // "InvalidRedirectUriException".
+ //
+ // Indicates that one or more redirect URI in the request is not supported for
+ // this operation.
+ ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException"
+
+ // ErrCodeInvalidRequestException for service response error code
+ // "InvalidRequestException".
+ //
+ // Indicates that something is wrong with the input to the request. For example,
+ // a required parameter might be missing or out of range.
+ ErrCodeInvalidRequestException = "InvalidRequestException"
+
+ // ErrCodeInvalidRequestRegionException for service response error code
+ // "InvalidRequestRegionException".
+ //
+ // Indicates that a token provided as input to the request was issued by and
+ // is only usable by calling IAM Identity Center endpoints in another region.
+ ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException"
+
+ // ErrCodeInvalidScopeException for service response error code
+ // "InvalidScopeException".
+ //
+ // Indicates that the scope provided in the request is invalid.
+ ErrCodeInvalidScopeException = "InvalidScopeException"
+
+ // ErrCodeSlowDownException for service response error code
+ // "SlowDownException".
+ //
+ // Indicates that the client is making the request too frequently and is more
+ // than the service can handle.
+ ErrCodeSlowDownException = "SlowDownException"
+
+ // ErrCodeUnauthorizedClientException for service response error code
+ // "UnauthorizedClientException".
+ //
+ // Indicates that the client is not currently authorized to make the request.
+ // This can happen when a clientId is not issued for a public client.
+ ErrCodeUnauthorizedClientException = "UnauthorizedClientException"
+
+ // ErrCodeUnsupportedGrantTypeException for service response error code
+ // "UnsupportedGrantTypeException".
+ //
+ // Indicates that the grant type in the request is not supported by the service.
+ ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException"
+)
+
+var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
+ "AccessDeniedException": newErrorAccessDeniedException,
+ "AuthorizationPendingException": newErrorAuthorizationPendingException,
+ "ExpiredTokenException": newErrorExpiredTokenException,
+ "InternalServerException": newErrorInternalServerException,
+ "InvalidClientException": newErrorInvalidClientException,
+ "InvalidClientMetadataException": newErrorInvalidClientMetadataException,
+ "InvalidGrantException": newErrorInvalidGrantException,
+ "InvalidRedirectUriException": newErrorInvalidRedirectUriException,
+ "InvalidRequestException": newErrorInvalidRequestException,
+ "InvalidRequestRegionException": newErrorInvalidRequestRegionException,
+ "InvalidScopeException": newErrorInvalidScopeException,
+ "SlowDownException": newErrorSlowDownException,
+ "UnauthorizedClientException": newErrorUnauthorizedClientException,
+ "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException,
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
new file mode 100644
index 000000000..782bae369
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
@@ -0,0 +1,106 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restjson"
+)
+
+// SSOOIDC provides the API operation methods for making requests to
+// AWS SSO OIDC. See this package's package overview docs
+// for details on the service.
+//
+// SSOOIDC methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type SSOOIDC struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "SSO OIDC" // Name of service.
+ EndpointsID = "oidc" // ID to lookup a service endpoint with.
+ ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service.
+)
+
+// New creates a new instance of the SSOOIDC client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+//
+// mySession := session.Must(session.NewSession())
+//
+// // Create a SSOOIDC client from just a session.
+// svc := ssooidc.New(mySession)
+//
+// // Create a SSOOIDC client with additional configuration
+// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ if c.SigningNameDerived || len(c.SigningName) == 0 {
+ c.SigningName = "sso-oauth"
+ }
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC {
+ svc := &SSOOIDC{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2019-06-10",
+ ResolvedRegion: resolvedRegion,
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(
+ protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
+ )
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a SSOOIDC operation and runs any
+// custom request initialization.
+func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index 2b7e675ab..2c395f5f6 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// AssumeRole API operation for AWS Security Token Service.
//
// Returns a set of temporary security credentials that you can use to access
-// Amazon Web Services resources that you might not normally have access to.
-// These temporary credentials consist of an access key ID, a secret access
-// key, and a security token. Typically, you use AssumeRole within your account
-// or for cross-account access. For a comparison of AssumeRole with other API
-// operations that produce temporary credentials, see Requesting Temporary Security
-// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// Amazon Web Services resources. These temporary credentials consist of an
+// access key ID, a secret access key, and a security token. Typically, you
+// use AssumeRole within your account or for cross-account access. For a comparison
+// of AssumeRole with other API operations that produce temporary credentials,
+// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
@@ -74,21 +73,21 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plaintext that you use for both inline
-// and managed session policies can't exceed 2,048 characters. Passing policies
-// to this operation returns new temporary credentials. The resulting session's
-// permissions are the intersection of the role's identity-based policy and
-// the session policies. You can use the role's temporary credentials in subsequent
-// Amazon Web Services API calls to access resources in the account that owns
-// the role. You cannot use session policies to grant more permissions than
-// those allowed by the identity-based policy of the role that is being assumed.
-// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// inline session policy. You can also specify up to 10 managed policy Amazon
+// Resource Names (ARNs) to use as managed session policies. The plaintext that
+// you use for both inline and managed session policies can't exceed 2,048 characters.
+// Passing policies to this operation returns new temporary credentials. The
+// resulting session's permissions are the intersection of the role's identity-based
+// policy and the session policies. You can use the role's temporary credentials
+// in subsequent Amazon Web Services API calls to access resources in the account
+// that owns the role. You cannot use session policies to grant more permissions
+// than those allowed by the identity-based policy of the role that is being
+// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
-// When you create a role, you create two policies: A role trust policy that
-// specifies who can assume the role and a permissions policy that specifies
-// what can be done with the role. You specify the trusted principal who is
+// When you create a role, you create two policies: a role trust policy that
+// specifies who can assume the role, and a permissions policy that specifies
+// what can be done with the role. You specify the trusted principal that is
// allowed to assume the role in the role trust policy.
//
// To assume a role from a different account, your Amazon Web Services account
@@ -97,9 +96,9 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// are allowed to delegate that access to users in the account.
//
// A user who wants to access a role in a different account must also have permissions
-// that are delegated from the user account administrator. The administrator
-// must attach a policy that allows the user to call AssumeRole for the ARN
-// of the role in the other account.
+// that are delegated from the account administrator. The administrator must
+// attach a policy that allows the user to call AssumeRole for the ARN of the
+// role in the other account.
//
// To allow a user to assume a role in the same account, you can do either of
// the following:
@@ -307,16 +306,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plaintext that you use for both inline
-// and managed session policies can't exceed 2,048 characters. Passing policies
-// to this operation returns new temporary credentials. The resulting session's
-// permissions are the intersection of the role's identity-based policy and
-// the session policies. You can use the role's temporary credentials in subsequent
-// Amazon Web Services API calls to access resources in the account that owns
-// the role. You cannot use session policies to grant more permissions than
-// those allowed by the identity-based policy of the role that is being assumed.
-// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// inline session policy. You can also specify up to 10 managed policy Amazon
+// Resource Names (ARNs) to use as managed session policies. The plaintext that
+// you use for both inline and managed session policies can't exceed 2,048 characters.
+// Passing policies to this operation returns new temporary credentials. The
+// resulting session's permissions are the intersection of the role's identity-based
+// policy and the session policies. You can use the role's temporary credentials
+// in subsequent Amazon Web Services API calls to access resources in the account
+// that owns the role. You cannot use session policies to grant more permissions
+// than those allowed by the identity-based policy of the role that is being
+// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
@@ -343,11 +342,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
-// An Amazon Web Services conversion compresses the passed session policies
-// and session tags into a packed binary format that has a separate limit. Your
-// request can fail for this limit even if your plaintext meets the other requirements.
-// The PackedPolicySize response element indicates by percentage how close the
-// policies and tags for your request are to the upper size limit.
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has
+// a separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the
+// upper size limit.
//
// You can pass a session tag with the same key as a tag that is attached to
// the role. When you do, session tags override the role's tags with the same
@@ -517,10 +517,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// a user. You can also supply the user with a consistent identity throughout
// the lifetime of an application.
//
-// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
-// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito
-// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
-// in the Amazon Web Services SDK for iOS Developer Guide.
+// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html)
+// in Amazon Cognito Developer Guide.
//
// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
// Services security credentials. Therefore, you can distribute an application
@@ -563,16 +561,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
//
// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plaintext that you use for both inline
-// and managed session policies can't exceed 2,048 characters. Passing policies
-// to this operation returns new temporary credentials. The resulting session's
-// permissions are the intersection of the role's identity-based policy and
-// the session policies. You can use the role's temporary credentials in subsequent
-// Amazon Web Services API calls to access resources in the account that owns
-// the role. You cannot use session policies to grant more permissions than
-// those allowed by the identity-based policy of the role that is being assumed.
-// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// inline session policy. You can also specify up to 10 managed policy Amazon
+// Resource Names (ARNs) to use as managed session policies. The plaintext that
+// you use for both inline and managed session policies can't exceed 2,048 characters.
+// Passing policies to this operation returns new temporary credentials. The
+// resulting session's permissions are the intersection of the role's identity-based
+// policy and the session policies. You can use the role's temporary credentials
+// in subsequent Amazon Web Services API calls to access resources in the account
+// that owns the role. You cannot use session policies to grant more permissions
+// than those allowed by the identity-based policy of the role that is being
+// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
// # Tags
@@ -588,11 +586,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
-// An Amazon Web Services conversion compresses the passed session policies
-// and session tags into a packed binary format that has a separate limit. Your
-// request can fail for this limit even if your plaintext meets the other requirements.
-// The PackedPolicySize response element indicates by percentage how close the
-// policies and tags for your request are to the upper size limit.
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has
+// a separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the
+// upper size limit.
//
// You can pass a session tag with the same key as a tag that is attached to
// the role. When you do, the session tag overrides the role tag with the same
@@ -983,11 +982,11 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
// call the operation.
//
// No permissions are required to perform this operation. If an administrator
-// adds a policy to your IAM user or role that explicitly denies access to the
-// sts:GetCallerIdentity action, you can still perform this operation. Permissions
-// are not required because the same information is returned when an IAM user
-// or role is denied access. To view an example response, see I Am Not Authorized
-// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
+// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity
+// action, you can still perform this operation. Permissions are not required
+// because the same information is returned when access is denied. To view an
+// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
// in the IAM User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1062,18 +1061,26 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// GetFederationToken API operation for AWS Security Token Service.
//
// Returns a set of temporary security credentials (consisting of an access
-// key ID, a secret access key, and a security token) for a federated user.
-// A typical use is in a proxy application that gets temporary security credentials
-// on behalf of distributed applications inside a corporate network. You must
-// call the GetFederationToken operation using the long-term security credentials
-// of an IAM user. As a result, this call is appropriate in contexts where those
-// credentials can be safely stored, usually in a server-based application.
+// key ID, a secret access key, and a security token) for a user. A typical
+// use is in a proxy application that gets temporary security credentials on
+// behalf of distributed applications inside a corporate network.
+//
+// You must call the GetFederationToken operation using the long-term security
+// credentials of an IAM user. As a result, this call is appropriate in contexts
+// where those credentials can be safeguarded, usually in a server-based application.
// For a comparison of GetFederationToken with the other API operations that
// produce temporary credentials, see Requesting Temporary Security Credentials
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
+// Although it is possible to call GetFederationToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user that
+// you create for the purpose of a proxy application, we do not recommend it.
+// For more information, see Safeguard your root user credentials and don't
+// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
+// in the IAM User Guide.
+//
// You can create a mobile-based or browser-based app that can authenticate
// users using a web identity provider like Login with Amazon, Facebook, Google,
// or an OpenID Connect-compatible identity provider. In this case, we recommend
@@ -1082,37 +1089,31 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
// in the IAM User Guide.
//
-// You can also call GetFederationToken using the security credentials of an
-// Amazon Web Services account root user, but we do not recommend it. Instead,
-// we recommend that you create an IAM user for the purpose of the proxy application.
-// Then attach a policy to the IAM user that limits federated users to only
-// the actions and resources that they need to access. For more information,
-// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
-// in the IAM User Guide.
-//
// # Session duration
//
// The temporary credentials are valid for the specified duration, from 900
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
// session duration is 43,200 seconds (12 hours). Temporary credentials obtained
-// by using the Amazon Web Services account root user credentials have a maximum
-// duration of 3,600 seconds (1 hour).
+// by using the root user credentials have a maximum duration of 3,600 seconds
+// (1 hour).
//
// # Permissions
//
// You can use the temporary credentials created by GetFederationToken in any
-// Amazon Web Services service except the following:
+// Amazon Web Services service with the following exceptions:
//
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
-// API.
+// API. This limitation does not apply to console sessions.
//
// - You cannot call any STS operations except GetCallerIdentity.
//
+// You can use temporary credentials for single sign-on (SSO) to the console.
+//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to
-// use as managed session policies. The plaintext that you use for both inline
-// and managed session policies can't exceed 2,048 characters.
+// inline session policy. You can also specify up to 10 managed policy Amazon
+// Resource Names (ARNs) to use as managed session policies. The plaintext that
+// you use for both inline and managed session policies can't exceed 2,048 characters.
//
// Though the session policy parameters are optional, if you do not pass a policy,
// then the resulting federated user session has no permissions. When you pass
@@ -1264,12 +1265,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// or IAM user. The credentials consist of an access key ID, a secret access
// key, and a security token. Typically, you use GetSessionToken if you want
// to use MFA to protect programmatic calls to specific Amazon Web Services
-// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would
-// need to call GetSessionToken and submit an MFA code that is associated with
-// their MFA device. Using the temporary security credentials that are returned
-// from the call, IAM users can then make programmatic calls to API operations
-// that require MFA authentication. If you do not supply a correct MFA code,
-// then the API returns an access denied error. For a comparison of GetSessionToken
+// API operations like Amazon EC2 StopInstances.
+//
+// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that the call returns, IAM users can then make programmatic calls to API
+// operations that require MFA authentication. An incorrect MFA code causes
+// the API to return an access denied error. For a comparison of GetSessionToken
// with the other API operations that produce temporary credentials, see Requesting
// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
@@ -1284,13 +1286,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// # Session Duration
//
// The GetSessionToken operation must be called by using the long-term Amazon
-// Web Services security credentials of the Amazon Web Services account root
-// user or an IAM user. Credentials that are created by IAM users are valid
-// for the duration that you specify. This duration can range from 900 seconds
-// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default
-// of 43,200 seconds (12 hours). Credentials based on account credentials can
-// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a
-// default of 1 hour.
+// Web Services security credentials of an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify. This duration can
+// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36
+// hours), with a default of 43,200 seconds (12 hours). Credentials based on
+// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds
+// (1 hour), with a default of 1 hour.
//
// # Permissions
//
@@ -1302,20 +1303,20 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
//
// - You cannot call any STS API except AssumeRole or GetCallerIdentity.
//
-// We recommend that you do not call GetSessionToken with Amazon Web Services
-// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
-// by creating one or more IAM users, giving them the necessary permissions,
-// and using IAM users for everyday interaction with Amazon Web Services.
+// The credentials that GetSessionToken returns are based on permissions associated
+// with the IAM user whose credentials were used to call the operation. The
+// temporary credentials have the same permissions as the IAM user.
//
-// The credentials that are returned by GetSessionToken are based on permissions
-// associated with the user whose credentials were used to call the operation.
-// If GetSessionToken is called using Amazon Web Services account root user
-// credentials, the temporary credentials have root user permissions. Similarly,
-// if GetSessionToken is called using the credentials of an IAM user, the temporary
-// credentials have the same permissions as the IAM user.
+// Although it is possible to call GetSessionToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user, we do
+// not recommend it. If GetSessionToken is called using root user credentials,
+// the temporary credentials have root user permissions. For more information,
+// see Safeguard your root user credentials and don't use them for everyday
+// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
+// in the IAM User Guide
//
// For more information about using GetSessionToken to create temporary credentials,
-// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
// in the IAM User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1424,11 +1425,12 @@ type AssumeRoleInput struct {
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
// return (\u000D) characters.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1441,11 +1443,12 @@ type AssumeRoleInput struct {
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the Amazon Web Services General Reference.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -1457,6 +1460,17 @@ type AssumeRoleInput struct {
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
+ // A list of previously acquired trusted context assertions in the format of
+ // a JSON array. The trusted context assertion is signed and encrypted by Amazon
+ // Web Services STS.
+ //
+ // The following is an example of a ProvidedContext value that includes a single
+ // trusted context assertion and the ARN of the context provider from which
+ // the trusted context assertion was generated.
+ //
+ // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
+ ProvidedContexts []*ProvidedContext `type:"list"`
+
// The Amazon Resource Name (ARN) of the role to assume.
//
// RoleArn is a required field
@@ -1520,11 +1534,12 @@ type AssumeRoleInput struct {
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
//
// You can pass a session tag with the same key as a tag that is already attached
// to the role. When you do, session tags override a role tag with the same
@@ -1629,6 +1644,16 @@ func (s *AssumeRoleInput) Validate() error {
}
}
}
+ if s.ProvidedContexts != nil {
+ for i, v := range s.ProvidedContexts {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
if s.Tags != nil {
for i, v := range s.Tags {
if v == nil {
@@ -1670,6 +1695,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn
return s
}
+// SetProvidedContexts sets the ProvidedContexts field's value.
+func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput {
+ s.ProvidedContexts = v
+ return s
+}
+
// SetRoleArn sets the RoleArn field's value.
func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
s.RoleArn = &v
@@ -1843,11 +1874,12 @@ type AssumeRoleWithSAMLInput struct {
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
// return (\u000D) characters.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -1860,11 +1892,12 @@ type AssumeRoleWithSAMLInput struct {
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the Amazon Web Services General Reference.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -1892,8 +1925,12 @@ type AssumeRoleWithSAMLInput struct {
// For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
// in the IAM User Guide.
//
+ // SAMLAssertion is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's
+ // String and GoString methods.
+ //
// SAMLAssertion is a required field
- SAMLAssertion *string `min:"4" type:"string" required:"true"`
+ SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation.
@@ -2028,7 +2065,7 @@ type AssumeRoleWithSAMLOutput struct {
// IAM.
//
// The combination of NameQualifier and Subject can be used to uniquely identify
- // a federated user.
+ // a user.
//
// The following pseudocode shows how the hash value is calculated:
//
@@ -2190,11 +2227,12 @@ type AssumeRoleWithWebIdentityInput struct {
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
// return (\u000D) characters.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -2207,11 +2245,12 @@ type AssumeRoleWithWebIdentityInput struct {
// Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the Amazon Web Services General Reference.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
//
// Passing policies to this operation returns new temporary credentials. The
// resulting session's permissions are the intersection of the role's identity-based
@@ -2254,10 +2293,15 @@ type AssumeRoleWithWebIdentityInput struct {
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
// the identity provider. Your application must get this token by authenticating
// the user who is using your application with a web identity provider before
- // the application makes an AssumeRoleWithWebIdentity call.
+ // the application makes an AssumeRoleWithWebIdentity call. Only tokens with
+ // RSA algorithms (RS256) are supported.
+ //
+ // WebIdentityToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
+ // String and GoString methods.
//
// WebIdentityToken is a required field
- WebIdentityToken *string `min:"4" type:"string" required:"true"`
+ WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation.
@@ -2563,8 +2607,12 @@ type Credentials struct {
// The secret access key that can be used to sign requests.
//
+ // SecretAccessKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by Credentials's
+ // String and GoString methods.
+ //
// SecretAccessKey is a required field
- SecretAccessKey *string `type:"string" required:"true"`
+ SecretAccessKey *string `type:"string" required:"true" sensitive:"true"`
// The token that users must pass to the service API to use the temporary credentials.
//
@@ -2912,10 +2960,9 @@ type GetFederationTokenInput struct {
// The duration, in seconds, that the session should last. Acceptable durations
// for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
// (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
- // using Amazon Web Services account root user credentials are restricted to
- // a maximum of 3,600 seconds (one hour). If the specified duration is longer
- // than one hour, the session obtained by using root user credentials defaults
- // to one hour.
+ // using root user credentials are restricted to a maximum of 3,600 seconds
+ // (one hour). If the specified duration is longer than one hour, the session
+ // obtained by using root user credentials defaults to one hour.
DurationSeconds *int64 `min:"900" type:"integer"`
// The name of the federated user. The name is used as an identifier for the
@@ -2934,8 +2981,8 @@ type GetFederationTokenInput struct {
//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policies to
- // use as managed session policies.
+ // inline session policy. You can also specify up to 10 managed policy Amazon
+ // Resource Names (ARNs) to use as managed session policies.
//
// This parameter is optional. However, if you do not pass any session policies,
// then the resulting federated user session has no permissions.
@@ -2960,11 +3007,12 @@ type GetFederationTokenInput struct {
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
// return (\u000D) characters.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
Policy *string `min:"1" type:"string"`
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want
@@ -2973,11 +3021,12 @@ type GetFederationTokenInput struct {
//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policies to
- // use as managed session policies. The plaintext that you use for both inline
- // and managed session policies can't exceed 2,048 characters. You can provide
- // up to 10 managed policy ARNs. For more information about ARNs, see Amazon
- // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // inline session policy. You can also specify up to 10 managed policy Amazon
+ // Resource Names (ARNs) to use as managed session policies. The plaintext that
+ // you use for both inline and managed session policies can't exceed 2,048 characters.
+ // You can provide up to 10 managed policy ARNs. For more information about
+ // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
+ // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
// in the Amazon Web Services General Reference.
//
// This parameter is optional. However, if you do not pass any session policies,
@@ -2997,11 +3046,12 @@ type GetFederationTokenInput struct {
// by the policy. These permissions are granted in addition to the permissions
// that are granted by the session policies.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
PolicyArns []*PolicyDescriptorType `type:"list"`
// A list of session tags. Each session tag consists of a key name and an associated
@@ -3015,11 +3065,12 @@ type GetFederationTokenInput struct {
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
- // An Amazon Web Services conversion compresses the passed session policies
- // and session tags into a packed binary format that has a separate limit. Your
- // request can fail for this limit even if your plaintext meets the other requirements.
- // The PackedPolicySize response element indicates by percentage how close the
- // policies and tags for your request are to the upper size limit.
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has
+ // a separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the
+ // upper size limit.
//
// You can pass a session tag with the same key as a tag that is already attached
// to the user you are federating. When you do, session tags override a user
@@ -3362,6 +3413,67 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
return s
}
+// Contains information about the provided context. This includes the signed
+// and encrypted trusted context assertion and the context provider ARN from
+// which the trusted context assertion was generated.
+type ProvidedContext struct {
+ _ struct{} `type:"structure"`
+
+ // The signed and encrypted trusted context assertion generated by the context
+ // provider. The trusted context assertion is signed and encrypted by Amazon
+ // Web Services STS.
+ ContextAssertion *string `min:"4" type:"string"`
+
+ // The context provider ARN from which the trusted context assertion was generated.
+ ProviderArn *string `min:"20" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ProvidedContext) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ProvidedContext) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ProvidedContext) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"}
+ if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4))
+ }
+ if s.ProviderArn != nil && len(*s.ProviderArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetContextAssertion sets the ContextAssertion field's value.
+func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext {
+ s.ContextAssertion = &v
+ return s
+}
+
+// SetProviderArn sets the ProviderArn field's value.
+func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext {
+ s.ProviderArn = &v
+ return s
+}
+
// You can pass custom key-value pair attributes when you assume a role or federate
// a user. These are called session tags. You can then use the session tags
// to control access to resources. For more information, see Tagging Amazon
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
index c40f5a2a5..ea1d9eb0c 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -4,10 +4,9 @@
// requests to AWS Security Token Service.
//
// Security Token Service (STS) enables you to request temporary, limited-privilege
-// credentials for Identity and Access Management (IAM) users or for users that
-// you authenticate (federated users). This guide provides descriptions of the
-// STS API. For more information about using this service, see Temporary Security
-// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+// credentials for users. This guide provides descriptions of the STS API. For
+// more information about using this service, see Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
//
diff --git a/vendor/github.com/cncf/xds/go/LICENSE b/vendor/github.com/cncf/xds/go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
new file mode 100644
index 000000000..3c751b6ca
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
@@ -0,0 +1,411 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/migrate.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+}
+
+func (x *MigrateAnnotation) Reset() {
+ *x = MigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MigrateAnnotation) ProtoMessage() {}
+
+func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*MigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+type FieldMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"`
+}
+
+func (x *FieldMigrateAnnotation) Reset() {
+ *x = FieldMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMigrateAnnotation) ProtoMessage() {}
+
+func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FieldMigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+func (x *FieldMigrateAnnotation) GetOneofPromotion() string {
+ if x != nil {
+ return x.OneofPromotion
+ }
+ return ""
+}
+
+type FileMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"`
+}
+
+func (x *FileMigrateAnnotation) Reset() {
+ *x = FileMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileMigrateAnnotation) ProtoMessage() {}
+
+func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FileMigrateAnnotation) GetMoveToPackage() string {
+ if x != nil {
+ return x.MoveToPackage
+ }
+ return ""
+}
+
+var file_udpa_annotations_migrate_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.message_migrate",
+ Tag: "bytes,171962766,opt,name=message_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.field_migrate",
+ Tag: "bytes,171962766,opt,name=field_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_migrate",
+ Tag: "bytes,171962766,opt,name=enum_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_value_migrate",
+ Tag: "bytes,171962766,opt,name=enum_value_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.file_migrate",
+ Tag: "bytes,171962766,opt,name=file_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation message_migrate = 171962766;
+ E_MessageMigrate = &file_udpa_annotations_migrate_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional udpa.annotations.FieldMigrateAnnotation field_migrate = 171962766;
+ E_FieldMigrate = &file_udpa_annotations_migrate_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation enum_migrate = 171962766;
+ E_EnumMigrate = &file_udpa_annotations_migrate_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation enum_value_migrate = 171962766;
+ E_EnumValueMigrate = &file_udpa_annotations_migrate_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional udpa.annotations.FileMigrateAnnotation file_migrate = 171962766;
+ E_FileMigrate = &file_udpa_annotations_migrate_proto_extTypes[4]
+)
+
+var File_udpa_annotations_migrate_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_migrate_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74,
+ 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f,
+ 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x6e,
+ 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3f, 0x0a, 0x15,
+ 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x74, 0x6f,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x3a, 0x70, 0x0a,
+ 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70,
+ 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a,
+ 0x6f, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x3a, 0x67, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x8e,
+ 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e,
+ 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x77, 0x0a, 0x12, 0x65, 0x6e, 0x75,
+ 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64,
+ 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x10, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x3a, 0x6b, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x75, 0x64, 0x70, 0x61,
+ 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x6c,
+ 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42,
+ 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_migrate_proto_rawDescOnce sync.Once
+ file_udpa_annotations_migrate_proto_rawDescData = file_udpa_annotations_migrate_proto_rawDesc
+)
+
+func file_udpa_annotations_migrate_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_migrate_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_migrate_proto_rawDescData)
+ })
+ return file_udpa_annotations_migrate_proto_rawDescData
+}
+
+var file_udpa_annotations_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_udpa_annotations_migrate_proto_goTypes = []interface{}{
+ (*MigrateAnnotation)(nil), // 0: udpa.annotations.MigrateAnnotation
+ (*FieldMigrateAnnotation)(nil), // 1: udpa.annotations.FieldMigrateAnnotation
+ (*FileMigrateAnnotation)(nil), // 2: udpa.annotations.FileMigrateAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions
+ (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions
+}
+var file_udpa_annotations_migrate_proto_depIdxs = []int32{
+ 3, // 0: udpa.annotations.message_migrate:extendee -> google.protobuf.MessageOptions
+ 4, // 1: udpa.annotations.field_migrate:extendee -> google.protobuf.FieldOptions
+ 5, // 2: udpa.annotations.enum_migrate:extendee -> google.protobuf.EnumOptions
+ 6, // 3: udpa.annotations.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions
+ 7, // 4: udpa.annotations.file_migrate:extendee -> google.protobuf.FileOptions
+ 0, // 5: udpa.annotations.message_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 1, // 6: udpa.annotations.field_migrate:type_name -> udpa.annotations.FieldMigrateAnnotation
+ 0, // 7: udpa.annotations.enum_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 0, // 8: udpa.annotations.enum_value_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 2, // 9: udpa.annotations.file_migrate:type_name -> udpa.annotations.FileMigrateAnnotation
+ 10, // [10:10] is the sub-list for method output_type
+ 10, // [10:10] is the sub-list for method input_type
+ 5, // [5:10] is the sub-list for extension type_name
+ 0, // [0:5] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_migrate_proto_init() }
+func file_udpa_annotations_migrate_proto_init() {
+ if File_udpa_annotations_migrate_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_udpa_annotations_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_udpa_annotations_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_migrate_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 5,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_migrate_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_migrate_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_migrate_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_migrate_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_migrate_proto = out.File
+ file_udpa_annotations_migrate_proto_rawDesc = nil
+ file_udpa_annotations_migrate_proto_goTypes = nil
+ file_udpa_annotations_migrate_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
new file mode 100644
index 000000000..38196d5eb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
@@ -0,0 +1,350 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/migrate.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MigrateAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// MigrateAnnotationValidationError is the validation error returned by
+// MigrateAnnotation.Validate if the designated constraints aren't met.
+type MigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MigrateAnnotationValidationError) ErrorName() string {
+ return "MigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MigrateAnnotationValidationError{}
+
+// Validate checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ // no validation rules for OneofPromotion
+
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldMigrateAnnotationValidationError is the validation error returned by
+// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
+type FieldMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldMigrateAnnotationValidationError) ErrorName() string {
+ return "FieldMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldMigrateAnnotationValidationError{}
+
+// Validate checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for MoveToPackage
+
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FileMigrateAnnotationValidationError is the validation error returned by
+// FileMigrateAnnotation.Validate if the designated constraints aren't met.
+type FileMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileMigrateAnnotationValidationError) ErrorName() string {
+ return "FileMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileMigrateAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
new file mode 100644
index 000000000..7c8339919
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
@@ -0,0 +1,196 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/security.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type FieldSecurityAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"`
+ ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"`
+}
+
+func (x *FieldSecurityAnnotation) Reset() {
+ *x = FieldSecurityAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_security_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldSecurityAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldSecurityAnnotation) ProtoMessage() {}
+
+func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_security_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_security_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedDownstream
+ }
+ return false
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedUpstream
+ }
+ return false
+}
+
+var file_udpa_annotations_security_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldSecurityAnnotation)(nil),
+ Field: 11122993,
+ Name: "udpa.annotations.security",
+ Tag: "bytes,11122993,opt,name=security",
+ Filename: "udpa/annotations/security.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional udpa.annotations.FieldSecurityAnnotation security = 11122993;
+ E_Security = &file_udpa_annotations_security_proto_extTypes[0]
+)
+
+var File_udpa_annotations_security_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_security_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f,
+ 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73,
+ 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a,
+ 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75,
+ 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x55, 0x70,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x67, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0xb1, 0xf2, 0xa6, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x75, 0x64, 0x70,
+ 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42,
+ 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_security_proto_rawDescOnce sync.Once
+ file_udpa_annotations_security_proto_rawDescData = file_udpa_annotations_security_proto_rawDesc
+)
+
+func file_udpa_annotations_security_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_security_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_security_proto_rawDescData)
+ })
+ return file_udpa_annotations_security_proto_rawDescData
+}
+
+var file_udpa_annotations_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_security_proto_goTypes = []interface{}{
+ (*FieldSecurityAnnotation)(nil), // 0: udpa.annotations.FieldSecurityAnnotation
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_udpa_annotations_security_proto_depIdxs = []int32{
+ 1, // 0: udpa.annotations.security:extendee -> google.protobuf.FieldOptions
+ 0, // 1: udpa.annotations.security:type_name -> udpa.annotations.FieldSecurityAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_security_proto_init() }
+func file_udpa_annotations_security_proto_init() {
+ if File_udpa_annotations_security_proto != nil {
+ return
+ }
+ file_udpa_annotations_status_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldSecurityAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_security_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_security_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_security_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_security_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_security_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_security_proto = out.File
+ file_udpa_annotations_security_proto_rawDesc = nil
+ file_udpa_annotations_security_proto_goTypes = nil
+ file_udpa_annotations_security_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
new file mode 100644
index 000000000..acc9bd7a1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/security.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FieldSecurityAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ConfigureForUntrustedDownstream
+
+ // no validation rules for ConfigureForUntrustedUpstream
+
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldSecurityAnnotationValidationError is the validation error returned by
+// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
+type FieldSecurityAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldSecurityAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldSecurityAnnotationValidationError) ErrorName() string {
+ return "FieldSecurityAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldSecurityAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldSecurityAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldSecurityAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldSecurityAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
new file mode 100644
index 000000000..e2b1a59cb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
@@ -0,0 +1,93 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/sensitive.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_udpa_annotations_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 76569463,
+ Name: "udpa.annotations.sensitive",
+ Tag: "varint,76569463,opt,name=sensitive",
+ Filename: "udpa/annotations/sensitive.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool sensitive = 76569463;
+ E_Sensitive = &file_udpa_annotations_sensitive_proto_extTypes[0]
+)
+
+var File_udpa_annotations_sensitive_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_sensitive_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf7, 0xb6, 0xc1, 0x24, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e,
+ 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_udpa_annotations_sensitive_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+}
+var file_udpa_annotations_sensitive_proto_depIdxs = []int32{
+ 0, // 0: udpa.annotations.sensitive:extendee -> google.protobuf.FieldOptions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_sensitive_proto_init() }
+func file_udpa_annotations_sensitive_proto_init() {
+ if File_udpa_annotations_sensitive_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_sensitive_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_sensitive_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_sensitive_proto_depIdxs,
+ ExtensionInfos: file_udpa_annotations_sensitive_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_sensitive_proto = out.File
+ file_udpa_annotations_sensitive_proto_rawDesc = nil
+ file_udpa_annotations_sensitive_proto_goTypes = nil
+ file_udpa_annotations_sensitive_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
new file mode 100644
index 000000000..f3fa61974
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
@@ -0,0 +1,36 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/sensitive.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
new file mode 100644
index 000000000..cf629f751
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
@@ -0,0 +1,253 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/status.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type PackageVersionStatus int32
+
+const (
+ PackageVersionStatus_UNKNOWN PackageVersionStatus = 0
+ PackageVersionStatus_FROZEN PackageVersionStatus = 1
+ PackageVersionStatus_ACTIVE PackageVersionStatus = 2
+ PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3
+)
+
+// Enum value maps for PackageVersionStatus.
+var (
+ PackageVersionStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "FROZEN",
+ 2: "ACTIVE",
+ 3: "NEXT_MAJOR_VERSION_CANDIDATE",
+ }
+ PackageVersionStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "FROZEN": 1,
+ "ACTIVE": 2,
+ "NEXT_MAJOR_VERSION_CANDIDATE": 3,
+ }
+)
+
+func (x PackageVersionStatus) Enum() *PackageVersionStatus {
+ p := new(PackageVersionStatus)
+ *p = x
+ return p
+}
+
+func (x PackageVersionStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_udpa_annotations_status_proto_enumTypes[0].Descriptor()
+}
+
+func (PackageVersionStatus) Type() protoreflect.EnumType {
+ return &file_udpa_annotations_status_proto_enumTypes[0]
+}
+
+func (x PackageVersionStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PackageVersionStatus.Descriptor instead.
+func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) {
+ return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0}
+}
+
+type StatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+ PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=udpa.annotations.PackageVersionStatus" json:"package_version_status,omitempty"`
+}
+
+func (x *StatusAnnotation) Reset() {
+ *x = StatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusAnnotation) ProtoMessage() {}
+
+func (x *StatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead.
+func (*StatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus {
+ if x != nil {
+ return x.PackageVersionStatus
+ }
+ return PackageVersionStatus_UNKNOWN
+}
+
+var file_udpa_annotations_status_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*StatusAnnotation)(nil),
+ Field: 222707719,
+ Name: "udpa.annotations.file_status",
+ Tag: "bytes,222707719,opt,name=file_status",
+ Filename: "udpa/annotations/status.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional udpa.annotations.StatusAnnotation file_status = 222707719;
+ E_FileStatus = &file_udpa_annotations_status_proto_extTypes[0]
+)
+
+var File_udpa_annotations_status_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_status_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b,
+ 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
+ 0x73, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a,
+ 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a,
+ 0x64, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x87, 0x80, 0x99,
+ 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_status_proto_rawDescOnce sync.Once
+ file_udpa_annotations_status_proto_rawDescData = file_udpa_annotations_status_proto_rawDesc
+)
+
+func file_udpa_annotations_status_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_status_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_status_proto_rawDescData)
+ })
+ return file_udpa_annotations_status_proto_rawDescData
+}
+
+var file_udpa_annotations_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_udpa_annotations_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_status_proto_goTypes = []interface{}{
+ (PackageVersionStatus)(0), // 0: udpa.annotations.PackageVersionStatus
+ (*StatusAnnotation)(nil), // 1: udpa.annotations.StatusAnnotation
+ (*descriptorpb.FileOptions)(nil), // 2: google.protobuf.FileOptions
+}
+var file_udpa_annotations_status_proto_depIdxs = []int32{
+ 0, // 0: udpa.annotations.StatusAnnotation.package_version_status:type_name -> udpa.annotations.PackageVersionStatus
+ 2, // 1: udpa.annotations.file_status:extendee -> google.protobuf.FileOptions
+ 1, // 2: udpa.annotations.file_status:type_name -> udpa.annotations.StatusAnnotation
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 2, // [2:3] is the sub-list for extension type_name
+ 1, // [1:2] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_status_proto_init() }
+func file_udpa_annotations_status_proto_init() {
+ if File_udpa_annotations_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_status_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_status_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_status_proto_depIdxs,
+ EnumInfos: file_udpa_annotations_status_proto_enumTypes,
+ MessageInfos: file_udpa_annotations_status_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_status_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_status_proto = out.File
+ file_udpa_annotations_status_proto_rawDesc = nil
+ file_udpa_annotations_status_proto_goTypes = nil
+ file_udpa_annotations_status_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
new file mode 100644
index 000000000..5633a8383
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/status.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on StatusAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ // no validation rules for PackageVersionStatus
+
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
+// StatusAnnotationValidationError is the validation error returned by
+// StatusAnnotation.Validate if the designated constraints aren't met.
+type StatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
new file mode 100644
index 000000000..8bd950f6b
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/versioning.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type VersioningAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"`
+}
+
+func (x *VersioningAnnotation) Reset() {
+ *x = VersioningAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_versioning_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VersioningAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VersioningAnnotation) ProtoMessage() {}
+
+func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_versioning_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead.
+func (*VersioningAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_versioning_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *VersioningAnnotation) GetPreviousMessageType() string {
+ if x != nil {
+ return x.PreviousMessageType
+ }
+ return ""
+}
+
+var file_udpa_annotations_versioning_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*VersioningAnnotation)(nil),
+ Field: 7881811,
+ Name: "udpa.annotations.versioning",
+ Tag: "bytes,7881811,opt,name=versioning",
+ Filename: "udpa/annotations/versioning.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional udpa.annotations.VersioningAnnotation versioning = 7881811;
+ E_Versioning = &file_udpa_annotations_versioning_proto_extTypes[0]
+)
+
+var File_udpa_annotations_versioning_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_versioning_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
+ 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xd3, 0x88, 0xe1, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x75, 0x64,
+ 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42,
+ 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_versioning_proto_rawDescOnce sync.Once
+ file_udpa_annotations_versioning_proto_rawDescData = file_udpa_annotations_versioning_proto_rawDesc
+)
+
+func file_udpa_annotations_versioning_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_versioning_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_versioning_proto_rawDescData)
+ })
+ return file_udpa_annotations_versioning_proto_rawDescData
+}
+
+var file_udpa_annotations_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_versioning_proto_goTypes = []interface{}{
+ (*VersioningAnnotation)(nil), // 0: udpa.annotations.VersioningAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions
+}
+var file_udpa_annotations_versioning_proto_depIdxs = []int32{
+ 1, // 0: udpa.annotations.versioning:extendee -> google.protobuf.MessageOptions
+ 0, // 1: udpa.annotations.versioning:type_name -> udpa.annotations.VersioningAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_versioning_proto_init() }
+func file_udpa_annotations_versioning_proto_init() {
+ if File_udpa_annotations_versioning_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VersioningAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_versioning_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_versioning_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_versioning_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_versioning_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_versioning_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_versioning_proto = out.File
+ file_udpa_annotations_versioning_proto_rawDesc = nil
+ file_udpa_annotations_versioning_proto_goTypes = nil
+ file_udpa_annotations_versioning_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
new file mode 100644
index 000000000..5fd86baff
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/versioning.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for PreviousMessageType
+
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
+// VersioningAnnotationValidationError is the validation error returned by
+// VersioningAnnotation.Validate if the designated constraints aren't met.
+type VersioningAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VersioningAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VersioningAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VersioningAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VersioningAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VersioningAnnotationValidationError) ErrorName() string {
+ return "VersioningAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e VersioningAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVersioningAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VersioningAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VersioningAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go
new file mode 100644
index 000000000..8eb3b7b24
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/type/v1/typed_struct.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *TypedStruct) Reset() {
+ *x = TypedStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedStruct) ProtoMessage() {}
+
+func (x *TypedStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead.
+func (*TypedStruct) Descriptor() ([]byte, []int) {
+ return file_udpa_type_v1_typed_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedStruct) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *TypedStruct) GetValue() *structpb.Struct {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_udpa_type_v1_typed_struct_proto protoreflect.FileDescriptor
+
+var file_udpa_type_v1_typed_struct_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0c, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x1a,
+ 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a,
+ 0x0b, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_type_v1_typed_struct_proto_rawDescOnce sync.Once
+ file_udpa_type_v1_typed_struct_proto_rawDescData = file_udpa_type_v1_typed_struct_proto_rawDesc
+)
+
+func file_udpa_type_v1_typed_struct_proto_rawDescGZIP() []byte {
+ file_udpa_type_v1_typed_struct_proto_rawDescOnce.Do(func() {
+ file_udpa_type_v1_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_type_v1_typed_struct_proto_rawDescData)
+ })
+ return file_udpa_type_v1_typed_struct_proto_rawDescData
+}
+
+var file_udpa_type_v1_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_type_v1_typed_struct_proto_goTypes = []interface{}{
+ (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct
+ (*structpb.Struct)(nil), // 1: google.protobuf.Struct
+}
+var file_udpa_type_v1_typed_struct_proto_depIdxs = []int32{
+ 1, // 0: udpa.type.v1.TypedStruct.value:type_name -> google.protobuf.Struct
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_udpa_type_v1_typed_struct_proto_init() }
+func file_udpa_type_v1_typed_struct_proto_init() {
+ if File_udpa_type_v1_typed_struct_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_type_v1_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_type_v1_typed_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_type_v1_typed_struct_proto_goTypes,
+ DependencyIndexes: file_udpa_type_v1_typed_struct_proto_depIdxs,
+ MessageInfos: file_udpa_type_v1_typed_struct_proto_msgTypes,
+ }.Build()
+ File_udpa_type_v1_typed_struct_proto = out.File
+ file_udpa_type_v1_typed_struct_proto_rawDesc = nil
+ file_udpa_type_v1_typed_struct_proto_goTypes = nil
+ file_udpa_type_v1_typed_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go
new file mode 100644
index 000000000..e336fb4a7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go
@@ -0,0 +1,166 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/type/v1/typed_struct.proto
+
+package v1
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TypedStruct) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TypedStructMultiError, or
+// nil if none found.
+func (m *TypedStruct) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedStruct) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TypeUrl
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TypedStructMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedStructMultiError is an error wrapping multiple validation errors
+// returned by TypedStruct.ValidateAll() if the designated constraints aren't met.
+type TypedStructMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedStructMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedStructMultiError) AllErrors() []error { return m }
+
+// TypedStructValidationError is the validation error returned by
+// TypedStruct.Validate if the designated constraints aren't met.
+type TypedStructValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedStructValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedStructValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedStructValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedStructValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TypedStructValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedStruct.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedStructValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedStructValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
new file mode 100644
index 000000000..5211b83c7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
@@ -0,0 +1,412 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/migrate.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+}
+
+func (x *MigrateAnnotation) Reset() {
+ *x = MigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MigrateAnnotation) ProtoMessage() {}
+
+func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*MigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+type FieldMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"`
+}
+
+func (x *FieldMigrateAnnotation) Reset() {
+ *x = FieldMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMigrateAnnotation) ProtoMessage() {}
+
+func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FieldMigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+func (x *FieldMigrateAnnotation) GetOneofPromotion() string {
+ if x != nil {
+ return x.OneofPromotion
+ }
+ return ""
+}
+
+type FileMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"`
+}
+
+func (x *FileMigrateAnnotation) Reset() {
+ *x = FileMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileMigrateAnnotation) ProtoMessage() {}
+
+func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FileMigrateAnnotation) GetMoveToPackage() string {
+ if x != nil {
+ return x.MoveToPackage
+ }
+ return ""
+}
+
+var file_xds_annotations_v3_migrate_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.message_migrate",
+ Tag: "bytes,112948430,opt,name=message_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldMigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.field_migrate",
+ Tag: "bytes,112948430,opt,name=field_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.enum_migrate",
+ Tag: "bytes,112948430,opt,name=enum_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.enum_value_migrate",
+ Tag: "bytes,112948430,opt,name=enum_value_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileMigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.file_migrate",
+ Tag: "bytes,112948430,opt,name=file_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation message_migrate = 112948430;
+ E_MessageMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldMigrateAnnotation field_migrate = 112948430;
+ E_FieldMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation enum_migrate = 112948430;
+ E_EnumMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation enum_value_migrate = 112948430;
+ E_EnumValueMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional xds.annotations.v3.FileMigrateAnnotation file_migrate = 112948430;
+ E_FileMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[4]
+)
+
+var File_xds_annotations_v3_migrate_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_migrate_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72,
+ 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a,
+ 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0e, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e,
+ 0x22, 0x3f, 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76,
+ 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+ 0x65, 0x3a, 0x72, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67,
+ 0x72, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x71, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x69, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d,
+ 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72,
+ 0x61, 0x74, 0x65, 0x3a, 0x79, 0x0a, 0x12, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed,
+ 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x6e,
+ 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x6d,
+ 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed,
+ 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, 0x2b, 0x5a,
+ 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_migrate_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_migrate_proto_rawDescData = file_xds_annotations_v3_migrate_proto_rawDesc
+)
+
+func file_xds_annotations_v3_migrate_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_migrate_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_migrate_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_migrate_proto_rawDescData
+}
+
+var file_xds_annotations_v3_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_xds_annotations_v3_migrate_proto_goTypes = []interface{}{
+ (*MigrateAnnotation)(nil), // 0: xds.annotations.v3.MigrateAnnotation
+ (*FieldMigrateAnnotation)(nil), // 1: xds.annotations.v3.FieldMigrateAnnotation
+ (*FileMigrateAnnotation)(nil), // 2: xds.annotations.v3.FileMigrateAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions
+ (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions
+}
+var file_xds_annotations_v3_migrate_proto_depIdxs = []int32{
+ 3, // 0: xds.annotations.v3.message_migrate:extendee -> google.protobuf.MessageOptions
+ 4, // 1: xds.annotations.v3.field_migrate:extendee -> google.protobuf.FieldOptions
+ 5, // 2: xds.annotations.v3.enum_migrate:extendee -> google.protobuf.EnumOptions
+ 6, // 3: xds.annotations.v3.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions
+ 7, // 4: xds.annotations.v3.file_migrate:extendee -> google.protobuf.FileOptions
+ 0, // 5: xds.annotations.v3.message_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 1, // 6: xds.annotations.v3.field_migrate:type_name -> xds.annotations.v3.FieldMigrateAnnotation
+ 0, // 7: xds.annotations.v3.enum_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 0, // 8: xds.annotations.v3.enum_value_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 2, // 9: xds.annotations.v3.file_migrate:type_name -> xds.annotations.v3.FileMigrateAnnotation
+ 10, // [10:10] is the sub-list for method output_type
+ 10, // [10:10] is the sub-list for method input_type
+ 5, // [5:10] is the sub-list for extension type_name
+ 0, // [0:5] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_migrate_proto_init() }
+func file_xds_annotations_v3_migrate_proto_init() {
+ if File_xds_annotations_v3_migrate_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_migrate_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 5,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_migrate_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_migrate_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_migrate_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_migrate_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_migrate_proto = out.File
+ file_xds_annotations_v3_migrate_proto_rawDesc = nil
+ file_xds_annotations_v3_migrate_proto_goTypes = nil
+ file_xds_annotations_v3_migrate_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
new file mode 100644
index 000000000..d57d77824
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
@@ -0,0 +1,350 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/migrate.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MigrateAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// MigrateAnnotationValidationError is the validation error returned by
+// MigrateAnnotation.Validate if the designated constraints aren't met.
+type MigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MigrateAnnotationValidationError) ErrorName() string {
+ return "MigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MigrateAnnotationValidationError{}
+
+// Validate checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ // no validation rules for OneofPromotion
+
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldMigrateAnnotationValidationError is the validation error returned by
+// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
+type FieldMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldMigrateAnnotationValidationError) ErrorName() string {
+ return "FieldMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldMigrateAnnotationValidationError{}
+
+// Validate checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for MoveToPackage
+
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FileMigrateAnnotationValidationError is the validation error returned by
+// FileMigrateAnnotation.Validate if the designated constraints aren't met.
+type FileMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileMigrateAnnotationValidationError) ErrorName() string {
+ return "FileMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileMigrateAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
new file mode 100644
index 000000000..14df890c1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
@@ -0,0 +1,197 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/security.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type FieldSecurityAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"`
+ ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"`
+}
+
+func (x *FieldSecurityAnnotation) Reset() {
+ *x = FieldSecurityAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_security_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldSecurityAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldSecurityAnnotation) ProtoMessage() {}
+
+func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_security_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_security_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedDownstream
+ }
+ return false
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedUpstream
+ }
+ return false
+}
+
+var file_xds_annotations_v3_security_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldSecurityAnnotation)(nil),
+ Field: 99044135,
+ Name: "xds.annotations.v3.security",
+ Tag: "bytes,99044135,opt,name=security",
+ Filename: "xds/annotations/v3/security.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldSecurityAnnotation security = 99044135;
+ E_Security = &file_xds_annotations_v3_security_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_security_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_security_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65,
+ 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72,
+ 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
+ 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75,
+ 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75,
+ 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x69, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa7, 0x96, 0x9d, 0x2f, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08,
+ 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_security_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_security_proto_rawDescData = file_xds_annotations_v3_security_proto_rawDesc
+)
+
+func file_xds_annotations_v3_security_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_security_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_security_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_security_proto_rawDescData
+}
+
+var file_xds_annotations_v3_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_annotations_v3_security_proto_goTypes = []interface{}{
+ (*FieldSecurityAnnotation)(nil), // 0: xds.annotations.v3.FieldSecurityAnnotation
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_security_proto_depIdxs = []int32{
+ 1, // 0: xds.annotations.v3.security:extendee -> google.protobuf.FieldOptions
+ 0, // 1: xds.annotations.v3.security:type_name -> xds.annotations.v3.FieldSecurityAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_security_proto_init() }
+func file_xds_annotations_v3_security_proto_init() {
+ if File_xds_annotations_v3_security_proto != nil {
+ return
+ }
+ file_xds_annotations_v3_status_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldSecurityAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_security_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_security_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_security_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_security_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_security_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_security_proto = out.File
+ file_xds_annotations_v3_security_proto_rawDesc = nil
+ file_xds_annotations_v3_security_proto_goTypes = nil
+ file_xds_annotations_v3_security_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
new file mode 100644
index 000000000..ac0143f27
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/security.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FieldSecurityAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ConfigureForUntrustedDownstream
+
+ // no validation rules for ConfigureForUntrustedUpstream
+
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldSecurityAnnotationValidationError is the validation error returned by
+// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
+type FieldSecurityAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldSecurityAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldSecurityAnnotationValidationError) ErrorName() string {
+ return "FieldSecurityAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldSecurityAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldSecurityAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldSecurityAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldSecurityAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
new file mode 100644
index 000000000..042b66bff
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
@@ -0,0 +1,93 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/sensitive.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_xds_annotations_v3_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 61008053,
+ Name: "xds.annotations.v3.sensitive",
+ Tag: "varint,61008053,opt,name=sensitive",
+ Filename: "xds/annotations/v3/sensitive.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool sensitive = 61008053;
+ E_Sensitive = &file_xds_annotations_v3_sensitive_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_sensitive_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_sensitive_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65,
+ 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb5, 0xd1, 0x8b, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_xds_annotations_v3_sensitive_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_sensitive_proto_depIdxs = []int32{
+ 0, // 0: xds.annotations.v3.sensitive:extendee -> google.protobuf.FieldOptions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_sensitive_proto_init() }
+func file_xds_annotations_v3_sensitive_proto_init() {
+ if File_xds_annotations_v3_sensitive_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_sensitive_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_sensitive_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_sensitive_proto_depIdxs,
+ ExtensionInfos: file_xds_annotations_v3_sensitive_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_sensitive_proto = out.File
+ file_xds_annotations_v3_sensitive_proto_rawDesc = nil
+ file_xds_annotations_v3_sensitive_proto_goTypes = nil
+ file_xds_annotations_v3_sensitive_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
new file mode 100644
index 000000000..c101d3acc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
@@ -0,0 +1,36 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/sensitive.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
new file mode 100644
index 000000000..5d5975ffb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
@@ -0,0 +1,495 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/status.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type PackageVersionStatus int32
+
+const (
+ PackageVersionStatus_UNKNOWN PackageVersionStatus = 0
+ PackageVersionStatus_FROZEN PackageVersionStatus = 1
+ PackageVersionStatus_ACTIVE PackageVersionStatus = 2
+ PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3
+)
+
+// Enum value maps for PackageVersionStatus.
+var (
+ PackageVersionStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "FROZEN",
+ 2: "ACTIVE",
+ 3: "NEXT_MAJOR_VERSION_CANDIDATE",
+ }
+ PackageVersionStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "FROZEN": 1,
+ "ACTIVE": 2,
+ "NEXT_MAJOR_VERSION_CANDIDATE": 3,
+ }
+)
+
+func (x PackageVersionStatus) Enum() *PackageVersionStatus {
+ p := new(PackageVersionStatus)
+ *p = x
+ return p
+}
+
+func (x PackageVersionStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_xds_annotations_v3_status_proto_enumTypes[0].Descriptor()
+}
+
+func (PackageVersionStatus) Type() protoreflect.EnumType {
+ return &file_xds_annotations_v3_status_proto_enumTypes[0]
+}
+
+func (x PackageVersionStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PackageVersionStatus.Descriptor instead.
+func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0}
+}
+
+type FileStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *FileStatusAnnotation) Reset() {
+ *x = FileStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileStatusAnnotation) ProtoMessage() {}
+
+func (x *FileStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*FileStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FileStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type MessageStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *MessageStatusAnnotation) Reset() {
+ *x = MessageStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MessageStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageStatusAnnotation) ProtoMessage() {}
+
+func (x *MessageStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*MessageStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *MessageStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type FieldStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *FieldStatusAnnotation) Reset() {
+ *x = FieldStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldStatusAnnotation) ProtoMessage() {}
+
+func (x *FieldStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FieldStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type StatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+ PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=xds.annotations.v3.PackageVersionStatus" json:"package_version_status,omitempty"`
+}
+
+func (x *StatusAnnotation) Reset() {
+ *x = StatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusAnnotation) ProtoMessage() {}
+
+func (x *StatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead.
+func (*StatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus {
+ if x != nil {
+ return x.PackageVersionStatus
+ }
+ return PackageVersionStatus_UNKNOWN
+}
+
+var file_xds_annotations_v3_status_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.file_status",
+ Tag: "bytes,226829418,opt,name=file_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MessageStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.message_status",
+ Tag: "bytes,226829418,opt,name=message_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.field_status",
+ Tag: "bytes,226829418,opt,name=field_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional xds.annotations.v3.FileStatusAnnotation file_status = 226829418;
+ E_FileStatus = &file_xds_annotations_v3_status_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.MessageStatusAnnotation message_status = 226829418;
+ E_MessageStatus = &file_xds_annotations_v3_status_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldStatusAnnotation field_status = 226829418;
+ E_FieldStatus = &file_xds_annotations_v3_status_proto_extTypes[2]
+)
+
+var File_xds_annotations_v3_status_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_status_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x40, 0x0a, 0x14, 0x46, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49,
+ 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x43, 0x0a, 0x17, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f,
+ 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
+ 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x41,
+ 0x0a, 0x15, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
+ 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69,
+ 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x12, 0x5e, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a,
+ 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a,
+ 0x6a, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94,
+ 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x76, 0x0a, 0x0e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea,
+ 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x3a, 0x6e, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_status_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_status_proto_rawDescData = file_xds_annotations_v3_status_proto_rawDesc
+)
+
+func file_xds_annotations_v3_status_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_status_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_status_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_status_proto_rawDescData
+}
+
+var file_xds_annotations_v3_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_xds_annotations_v3_status_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_xds_annotations_v3_status_proto_goTypes = []interface{}{
+ (PackageVersionStatus)(0), // 0: xds.annotations.v3.PackageVersionStatus
+ (*FileStatusAnnotation)(nil), // 1: xds.annotations.v3.FileStatusAnnotation
+ (*MessageStatusAnnotation)(nil), // 2: xds.annotations.v3.MessageStatusAnnotation
+ (*FieldStatusAnnotation)(nil), // 3: xds.annotations.v3.FieldStatusAnnotation
+ (*StatusAnnotation)(nil), // 4: xds.annotations.v3.StatusAnnotation
+ (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions
+ (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 7: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_status_proto_depIdxs = []int32{
+ 0, // 0: xds.annotations.v3.StatusAnnotation.package_version_status:type_name -> xds.annotations.v3.PackageVersionStatus
+ 5, // 1: xds.annotations.v3.file_status:extendee -> google.protobuf.FileOptions
+ 6, // 2: xds.annotations.v3.message_status:extendee -> google.protobuf.MessageOptions
+ 7, // 3: xds.annotations.v3.field_status:extendee -> google.protobuf.FieldOptions
+ 1, // 4: xds.annotations.v3.file_status:type_name -> xds.annotations.v3.FileStatusAnnotation
+ 2, // 5: xds.annotations.v3.message_status:type_name -> xds.annotations.v3.MessageStatusAnnotation
+ 3, // 6: xds.annotations.v3.field_status:type_name -> xds.annotations.v3.FieldStatusAnnotation
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 4, // [4:7] is the sub-list for extension type_name
+ 1, // [1:4] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_status_proto_init() }
+func file_xds_annotations_v3_status_proto_init() {
+ if File_xds_annotations_v3_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MessageStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_status_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_status_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_status_proto_depIdxs,
+ EnumInfos: file_xds_annotations_v3_status_proto_enumTypes,
+ MessageInfos: file_xds_annotations_v3_status_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_status_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_status_proto = out.File
+ file_xds_annotations_v3_status_proto_rawDesc = nil
+ file_xds_annotations_v3_status_proto_goTypes = nil
+ file_xds_annotations_v3_status_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
new file mode 100644
index 000000000..a87dbee8d
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
@@ -0,0 +1,452 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/status.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FileStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileStatusAnnotationMultiError, or nil if none found.
+func (m *FileStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return FileStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// FileStatusAnnotationValidationError is the validation error returned by
+// FileStatusAnnotation.Validate if the designated constraints aren't met.
+type FileStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileStatusAnnotationValidationError) ErrorName() string {
+ return "FileStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileStatusAnnotationValidationError{}
+
+// Validate checks the field values on MessageStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *MessageStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MessageStatusAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MessageStatusAnnotationMultiError, or nil if none found.
+func (m *MessageStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MessageStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return MessageStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MessageStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by MessageStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type MessageStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MessageStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// MessageStatusAnnotationValidationError is the validation error returned by
+// MessageStatusAnnotation.Validate if the designated constraints aren't met.
+type MessageStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MessageStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MessageStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MessageStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MessageStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MessageStatusAnnotationValidationError) ErrorName() string {
+ return "MessageStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MessageStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMessageStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MessageStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MessageStatusAnnotationValidationError{}
+
+// Validate checks the field values on FieldStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldStatusAnnotationMultiError, or nil if none found.
+func (m *FieldStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return FieldStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldStatusAnnotationValidationError is the validation error returned by
+// FieldStatusAnnotation.Validate if the designated constraints aren't met.
+type FieldStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldStatusAnnotationValidationError) ErrorName() string {
+ return "FieldStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldStatusAnnotationValidationError{}
+
+// Validate checks the field values on StatusAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ // no validation rules for PackageVersionStatus
+
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
+// StatusAnnotationValidationError is the validation error returned by
+// StatusAnnotation.Validate if the designated constraints aren't met.
+type StatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
new file mode 100644
index 000000000..97edd7690
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/versioning.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type VersioningAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"`
+}
+
+func (x *VersioningAnnotation) Reset() {
+ *x = VersioningAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VersioningAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VersioningAnnotation) ProtoMessage() {}
+
+func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead.
+func (*VersioningAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_versioning_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *VersioningAnnotation) GetPreviousMessageType() string {
+ if x != nil {
+ return x.PreviousMessageType
+ }
+ return ""
+}
+
+var file_xds_annotations_v3_versioning_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*VersioningAnnotation)(nil),
+ Field: 92389011,
+ Name: "xds.annotations.v3.versioning",
+ Tag: "bytes,92389011,opt,name=versioning",
+ Filename: "xds/annotations/v3/versioning.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.VersioningAnnotation versioning = 92389011;
+ E_Versioning = &file_xds_annotations_v3_versioning_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_versioning_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_versioning_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x93, 0xfd, 0x86, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_versioning_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_versioning_proto_rawDescData = file_xds_annotations_v3_versioning_proto_rawDesc
+)
+
+func file_xds_annotations_v3_versioning_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_versioning_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_versioning_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_versioning_proto_rawDescData
+}
+
+var file_xds_annotations_v3_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_annotations_v3_versioning_proto_goTypes = []interface{}{
+ (*VersioningAnnotation)(nil), // 0: xds.annotations.v3.VersioningAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions
+}
+var file_xds_annotations_v3_versioning_proto_depIdxs = []int32{
+ 1, // 0: xds.annotations.v3.versioning:extendee -> google.protobuf.MessageOptions
+ 0, // 1: xds.annotations.v3.versioning:type_name -> xds.annotations.v3.VersioningAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_versioning_proto_init() }
+func file_xds_annotations_v3_versioning_proto_init() {
+ if File_xds_annotations_v3_versioning_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VersioningAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_versioning_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_versioning_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_versioning_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_versioning_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_versioning_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_versioning_proto = out.File
+ file_xds_annotations_v3_versioning_proto_rawDesc = nil
+ file_xds_annotations_v3_versioning_proto_goTypes = nil
+ file_xds_annotations_v3_versioning_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
new file mode 100644
index 000000000..042c266e1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/versioning.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for PreviousMessageType
+
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
+// VersioningAnnotationValidationError is the validation error returned by
+// VersioningAnnotation.Validate if the designated constraints aren't met.
+type VersioningAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VersioningAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VersioningAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VersioningAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VersioningAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VersioningAnnotationValidationError) ErrorName() string {
+ return "VersioningAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e VersioningAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVersioningAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VersioningAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VersioningAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
new file mode 100644
index 000000000..035b8c010
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
@@ -0,0 +1,153 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/authority.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Authority struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Authority) Reset() {
+ *x = Authority{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_authority_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authority) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authority) ProtoMessage() {}
+
+func (x *Authority) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_authority_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authority.ProtoReflect.Descriptor instead.
+func (*Authority) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_authority_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Authority) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_xds_core_v3_authority_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_authority_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
+ 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_authority_proto_rawDescOnce sync.Once
+ file_xds_core_v3_authority_proto_rawDescData = file_xds_core_v3_authority_proto_rawDesc
+)
+
+func file_xds_core_v3_authority_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_authority_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_authority_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_authority_proto_rawDescData)
+ })
+ return file_xds_core_v3_authority_proto_rawDescData
+}
+
+var file_xds_core_v3_authority_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_authority_proto_goTypes = []interface{}{
+ (*Authority)(nil), // 0: xds.core.v3.Authority
+}
+var file_xds_core_v3_authority_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_authority_proto_init() }
+func file_xds_core_v3_authority_proto_init() {
+ if File_xds_core_v3_authority_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_authority_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authority); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_authority_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_authority_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_authority_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_authority_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_authority_proto = out.File
+ file_xds_core_v3_authority_proto_rawDesc = nil
+ file_xds_core_v3_authority_proto_goTypes = nil
+ file_xds_core_v3_authority_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
new file mode 100644
index 000000000..94317c2af
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
@@ -0,0 +1,146 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/authority.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Authority with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Authority) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Authority with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AuthorityMultiError, or nil
+// if none found.
+func (m *Authority) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Authority) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := AuthorityValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AuthorityMultiError(errors)
+ }
+
+ return nil
+}
+
+// AuthorityMultiError is an error wrapping multiple validation errors returned
+// by Authority.ValidateAll() if the designated constraints aren't met.
+type AuthorityMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AuthorityMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AuthorityMultiError) AllErrors() []error { return m }
+
+// AuthorityValidationError is the validation error returned by
+// Authority.Validate if the designated constraints aren't met.
+type AuthorityValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AuthorityValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AuthorityValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AuthorityValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AuthorityValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AuthorityValidationError) ErrorName() string { return "AuthorityValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AuthorityValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAuthority.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AuthorityValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AuthorityValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
new file mode 100644
index 000000000..58c27d7d3
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
@@ -0,0 +1,172 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/cidr.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CidrRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"`
+ PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"`
+}
+
+func (x *CidrRange) Reset() {
+ *x = CidrRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CidrRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CidrRange) ProtoMessage() {}
+
+func (x *CidrRange) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead.
+func (*CidrRange) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_cidr_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CidrRange) GetAddressPrefix() string {
+ if x != nil {
+ return x.AddressPrefix
+ }
+ return ""
+}
+
+func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.PrefixLen
+ }
+ return nil
+}
+
+var File_xds_core_v3_cidr_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_cidr_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69,
+ 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a,
+ 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a,
+ 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_cidr_proto_rawDescOnce sync.Once
+ file_xds_core_v3_cidr_proto_rawDescData = file_xds_core_v3_cidr_proto_rawDesc
+)
+
+func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_cidr_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_cidr_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_cidr_proto_rawDescData)
+ })
+ return file_xds_core_v3_cidr_proto_rawDescData
+}
+
+var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_cidr_proto_goTypes = []interface{}{
+ (*CidrRange)(nil), // 0: xds.core.v3.CidrRange
+ (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value
+}
+var file_xds_core_v3_cidr_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_cidr_proto_init() }
+func file_xds_core_v3_cidr_proto_init() {
+ if File_xds_core_v3_cidr_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_cidr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CidrRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_cidr_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_cidr_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_cidr_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_cidr_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_cidr_proto = out.File
+ file_xds_core_v3_cidr_proto_rawDesc = nil
+ file_xds_core_v3_cidr_proto_goTypes = nil
+ file_xds_core_v3_cidr_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
new file mode 100644
index 000000000..43327f56b
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/cidr.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CidrRange with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CidrRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CidrRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CidrRangeMultiError, or nil
+// if none found.
+func (m *CidrRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CidrRange) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
+ err := CidrRangeValidationError{
+ field: "AddressPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if wrapper := m.GetPrefixLen(); wrapper != nil {
+
+ if wrapper.GetValue() > 128 {
+ err := CidrRangeValidationError{
+ field: "PrefixLen",
+ reason: "value must be less than or equal to 128",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CidrRangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// CidrRangeMultiError is an error wrapping multiple validation errors returned
+// by CidrRange.ValidateAll() if the designated constraints aren't met.
+type CidrRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CidrRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CidrRangeMultiError) AllErrors() []error { return m }
+
+// CidrRangeValidationError is the validation error returned by
+// CidrRange.Validate if the designated constraints aren't met.
+type CidrRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CidrRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CidrRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CidrRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CidrRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CidrRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCidrRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CidrRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CidrRangeValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
new file mode 100644
index 000000000..f0b4c12f2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
@@ -0,0 +1,297 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/collection_entry.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CollectionEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ResourceSpecifier:
+ //
+ // *CollectionEntry_Locator
+ // *CollectionEntry_InlineEntry_
+ ResourceSpecifier isCollectionEntry_ResourceSpecifier `protobuf_oneof:"resource_specifier"`
+}
+
+func (x *CollectionEntry) Reset() {
+ *x = CollectionEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionEntry) ProtoMessage() {}
+
+func (x *CollectionEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionEntry.ProtoReflect.Descriptor instead.
+func (*CollectionEntry) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *CollectionEntry) GetResourceSpecifier() isCollectionEntry_ResourceSpecifier {
+ if m != nil {
+ return m.ResourceSpecifier
+ }
+ return nil
+}
+
+func (x *CollectionEntry) GetLocator() *ResourceLocator {
+ if x, ok := x.GetResourceSpecifier().(*CollectionEntry_Locator); ok {
+ return x.Locator
+ }
+ return nil
+}
+
+func (x *CollectionEntry) GetInlineEntry() *CollectionEntry_InlineEntry {
+ if x, ok := x.GetResourceSpecifier().(*CollectionEntry_InlineEntry_); ok {
+ return x.InlineEntry
+ }
+ return nil
+}
+
+type isCollectionEntry_ResourceSpecifier interface {
+ isCollectionEntry_ResourceSpecifier()
+}
+
+type CollectionEntry_Locator struct {
+ Locator *ResourceLocator `protobuf:"bytes,1,opt,name=locator,proto3,oneof"`
+}
+
+type CollectionEntry_InlineEntry_ struct {
+ InlineEntry *CollectionEntry_InlineEntry `protobuf:"bytes,2,opt,name=inline_entry,json=inlineEntry,proto3,oneof"`
+}
+
+func (*CollectionEntry_Locator) isCollectionEntry_ResourceSpecifier() {}
+
+func (*CollectionEntry_InlineEntry_) isCollectionEntry_ResourceSpecifier() {}
+
+type CollectionEntry_InlineEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *CollectionEntry_InlineEntry) Reset() {
+ *x = CollectionEntry_InlineEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionEntry_InlineEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionEntry_InlineEntry) ProtoMessage() {}
+
+func (x *CollectionEntry_InlineEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionEntry_InlineEntry.ProtoReflect.Descriptor instead.
+func (*CollectionEntry_InlineEntry) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *CollectionEntry_InlineEntry) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CollectionEntry_InlineEntry) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+var File_xds_core_v3_collection_entry_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64,
+ 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78,
+ 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x02, 0x0a, 0x0f, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38,
+ 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52,
+ 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x8b, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x32, 0x15, 0x5e, 0x5b,
+ 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x7e, 0x3a,
+ 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_collection_entry_proto_rawDescOnce sync.Once
+ file_xds_core_v3_collection_entry_proto_rawDescData = file_xds_core_v3_collection_entry_proto_rawDesc
+)
+
+func file_xds_core_v3_collection_entry_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_collection_entry_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_collection_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_collection_entry_proto_rawDescData)
+ })
+ return file_xds_core_v3_collection_entry_proto_rawDescData
+}
+
+var file_xds_core_v3_collection_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{
+ (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry
+ (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry
+ (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator
+ (*anypb.Any)(nil), // 3: google.protobuf.Any
+}
+var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{
+ 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator
+ 1, // 1: xds.core.v3.CollectionEntry.inline_entry:type_name -> xds.core.v3.CollectionEntry.InlineEntry
+ 3, // 2: xds.core.v3.CollectionEntry.InlineEntry.resource:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_collection_entry_proto_init() }
+func file_xds_core_v3_collection_entry_proto_init() {
+ if File_xds_core_v3_collection_entry_proto != nil {
+ return
+ }
+ file_xds_core_v3_resource_locator_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_collection_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_core_v3_collection_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionEntry_InlineEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_core_v3_collection_entry_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*CollectionEntry_Locator)(nil),
+ (*CollectionEntry_InlineEntry_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_collection_entry_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_collection_entry_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_collection_entry_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_collection_entry_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_collection_entry_proto = out.File
+ file_xds_core_v3_collection_entry_proto_rawDesc = nil
+ file_xds_core_v3_collection_entry_proto_goTypes = nil
+ file_xds_core_v3_collection_entry_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
new file mode 100644
index 000000000..610990b7f
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
@@ -0,0 +1,383 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/collection_entry.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CollectionEntry with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CollectionEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntryMultiError, or nil if none found.
+func (m *CollectionEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofResourceSpecifierPresent := false
+ switch v := m.ResourceSpecifier.(type) {
+ case *CollectionEntry_Locator:
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLocator()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *CollectionEntry_InlineEntry_:
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetInlineEntry()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofResourceSpecifierPresent {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return CollectionEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// CollectionEntryMultiError is an error wrapping multiple validation errors
+// returned by CollectionEntry.ValidateAll() if the designated constraints
+// aren't met.
+type CollectionEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntryMultiError) AllErrors() []error { return m }
+
+// CollectionEntryValidationError is the validation error returned by
+// CollectionEntry.Validate if the designated constraints aren't met.
+type CollectionEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntryValidationError) ErrorName() string { return "CollectionEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CollectionEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntryValidationError{}
+
+// Validate checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CollectionEntry_InlineEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntry_InlineEntryMultiError, or nil if none found.
+func (m *CollectionEntry_InlineEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry_InlineEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) {
+ err := CollectionEntry_InlineEntryValidationError{
+ field: "Name",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Version
+
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CollectionEntry_InlineEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// CollectionEntry_InlineEntryMultiError is an error wrapping multiple
+// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if
+// the designated constraints aren't met.
+type CollectionEntry_InlineEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntry_InlineEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m }
+
+// CollectionEntry_InlineEntryValidationError is the validation error returned
+// by CollectionEntry_InlineEntry.Validate if the designated constraints
+// aren't met.
+type CollectionEntry_InlineEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntry_InlineEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntry_InlineEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntry_InlineEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntry_InlineEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntry_InlineEntryValidationError) ErrorName() string {
+ return "CollectionEntry_InlineEntryValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CollectionEntry_InlineEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry_InlineEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntry_InlineEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntry_InlineEntryValidationError{}
+
+var _CollectionEntry_InlineEntry_Name_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\.~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
new file mode 100644
index 000000000..3e75637ea
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/context_params.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ContextParams struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ContextParams) Reset() {
+ *x = ContextParams{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_context_params_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ContextParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ContextParams) ProtoMessage() {}
+
+func (x *ContextParams) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_context_params_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ContextParams.ProtoReflect.Descriptor instead.
+func (*ContextParams) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_context_params_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ContextParams) GetParams() map[string]string {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+var File_xds_core_v3_context_params_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_context_params_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x8a, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x12, 0x3e, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_xds_core_v3_context_params_proto_rawDescOnce sync.Once
+ file_xds_core_v3_context_params_proto_rawDescData = file_xds_core_v3_context_params_proto_rawDesc
+)
+
+func file_xds_core_v3_context_params_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_context_params_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_context_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_context_params_proto_rawDescData)
+ })
+ return file_xds_core_v3_context_params_proto_rawDescData
+}
+
+var file_xds_core_v3_context_params_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_context_params_proto_goTypes = []interface{}{
+ (*ContextParams)(nil), // 0: xds.core.v3.ContextParams
+ nil, // 1: xds.core.v3.ContextParams.ParamsEntry
+}
+var file_xds_core_v3_context_params_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.ContextParams.params:type_name -> xds.core.v3.ContextParams.ParamsEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_context_params_proto_init() }
+func file_xds_core_v3_context_params_proto_init() {
+ if File_xds_core_v3_context_params_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_context_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ContextParams); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_context_params_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_context_params_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_context_params_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_context_params_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_context_params_proto = out.File
+ file_xds_core_v3_context_params_proto_rawDesc = nil
+ file_xds_core_v3_context_params_proto_goTypes = nil
+ file_xds_core_v3_context_params_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
new file mode 100644
index 000000000..1c9accaa3
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
@@ -0,0 +1,138 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/context_params.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ContextParams with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ContextParams) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ContextParams with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ContextParamsMultiError, or
+// nil if none found.
+func (m *ContextParams) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ContextParams) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Params
+
+ if len(errors) > 0 {
+ return ContextParamsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ContextParamsMultiError is an error wrapping multiple validation errors
+// returned by ContextParams.ValidateAll() if the designated constraints
+// aren't met.
+type ContextParamsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ContextParamsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ContextParamsMultiError) AllErrors() []error { return m }
+
+// ContextParamsValidationError is the validation error returned by
+// ContextParams.Validate if the designated constraints aren't met.
+type ContextParamsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ContextParamsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ContextParamsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ContextParamsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ContextParamsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ContextParamsValidationError) ErrorName() string { return "ContextParamsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ContextParamsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sContextParams.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ContextParamsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ContextParamsValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
new file mode 100644
index 000000000..7183e1143
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/extension.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedExtensionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"`
+}
+
+func (x *TypedExtensionConfig) Reset() {
+ *x = TypedExtensionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedExtensionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedExtensionConfig) ProtoMessage() {}
+
+func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_extension_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead.
+func (*TypedExtensionConfig) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_extension_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedExtensionConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any {
+ if x != nil {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+var File_xds_core_v3_extension_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_extension_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76,
+ 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x4e, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_extension_proto_rawDescOnce sync.Once
+ file_xds_core_v3_extension_proto_rawDescData = file_xds_core_v3_extension_proto_rawDesc
+)
+
+func file_xds_core_v3_extension_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_extension_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_extension_proto_rawDescData)
+ })
+ return file_xds_core_v3_extension_proto_rawDescData
+}
+
+var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_extension_proto_goTypes = []interface{}{
+ (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig
+ (*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_xds_core_v3_extension_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_extension_proto_init() }
+func file_xds_core_v3_extension_proto_init() {
+ if File_xds_core_v3_extension_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedExtensionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_extension_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_extension_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_extension_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_extension_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_extension_proto = out.File
+ file_xds_core_v3_extension_proto_rawDesc = nil
+ file_xds_core_v3_extension_proto_goTypes = nil
+ file_xds_core_v3_extension_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
new file mode 100644
index 000000000..839f3fef7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/extension.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *TypedExtensionConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TypedExtensionConfigMultiError, or nil if none found.
+func (m *TypedExtensionConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedExtensionConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := TypedExtensionConfigValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTypedConfig() == nil {
+ err := TypedExtensionConfigValidationError{
+ field: "TypedConfig",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if a := m.GetTypedConfig(); a != nil {
+
+ }
+
+ if len(errors) > 0 {
+ return TypedExtensionConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedExtensionConfigMultiError is an error wrapping multiple validation
+// errors returned by TypedExtensionConfig.ValidateAll() if the designated
+// constraints aren't met.
+type TypedExtensionConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedExtensionConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedExtensionConfigMultiError) AllErrors() []error { return m }
+
+// TypedExtensionConfigValidationError is the validation error returned by
+// TypedExtensionConfig.Validate if the designated constraints aren't met.
+type TypedExtensionConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedExtensionConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedExtensionConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedExtensionConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedExtensionConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedExtensionConfigValidationError) ErrorName() string {
+ return "TypedExtensionConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TypedExtensionConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedExtensionConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedExtensionConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedExtensionConfigValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
new file mode 100644
index 000000000..ced3bc3f4
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetName() *ResourceName {
+ if x != nil {
+ return x.Name
+ }
+ return nil
+}
+
+func (x *Resource) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *Resource) GetResource() *anypb.Any {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+var File_xds_core_v3_resource_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_proto_rawDescData = file_xds_core_v3_resource_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_resource_proto_goTypes = []interface{}{
+ (*Resource)(nil), // 0: xds.core.v3.Resource
+ (*ResourceName)(nil), // 1: xds.core.v3.ResourceName
+ (*anypb.Any)(nil), // 2: google.protobuf.Any
+}
+var file_xds_core_v3_resource_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName
+ 2, // 1: xds.core.v3.Resource.resource:type_name -> google.protobuf.Any
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_proto_init() }
+func file_xds_core_v3_resource_proto_init() {
+ if File_xds_core_v3_resource_proto != nil {
+ return
+ }
+ file_xds_core_v3_resource_name_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_resource_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_proto = out.File
+ file_xds_core_v3_resource_proto_rawDesc = nil
+ file_xds_core_v3_resource_proto_goTypes = nil
+ file_xds_core_v3_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
new file mode 100644
index 000000000..dc972171c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Resource with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Resource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Resource with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceMultiError, or nil
+// if none found.
+func (m *Resource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Resource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetName()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Version
+
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ResourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceMultiError is an error wrapping multiple validation errors returned
+// by Resource.ValidateAll() if the designated constraints aren't met.
+type ResourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceMultiError) AllErrors() []error { return m }
+
+// ResourceValidationError is the validation error returned by
+// Resource.Validate if the designated constraints aren't met.
+type ResourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
new file mode 100644
index 000000000..f469c18cf
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
@@ -0,0 +1,406 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource_locator.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceLocator_Scheme int32
+
+const (
+ ResourceLocator_XDSTP ResourceLocator_Scheme = 0
+ ResourceLocator_HTTP ResourceLocator_Scheme = 1
+ ResourceLocator_FILE ResourceLocator_Scheme = 2
+)
+
+// Enum value maps for ResourceLocator_Scheme.
+var (
+ ResourceLocator_Scheme_name = map[int32]string{
+ 0: "XDSTP",
+ 1: "HTTP",
+ 2: "FILE",
+ }
+ ResourceLocator_Scheme_value = map[string]int32{
+ "XDSTP": 0,
+ "HTTP": 1,
+ "FILE": 2,
+ }
+)
+
+func (x ResourceLocator_Scheme) Enum() *ResourceLocator_Scheme {
+ p := new(ResourceLocator_Scheme)
+ *p = x
+ return p
+}
+
+func (x ResourceLocator_Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ResourceLocator_Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_xds_core_v3_resource_locator_proto_enumTypes[0].Descriptor()
+}
+
+func (ResourceLocator_Scheme) Type() protoreflect.EnumType {
+ return &file_xds_core_v3_resource_locator_proto_enumTypes[0]
+}
+
+func (x ResourceLocator_Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceLocator_Scheme.Descriptor instead.
+func (ResourceLocator_Scheme) EnumDescriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type ResourceLocator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Scheme ResourceLocator_Scheme `protobuf:"varint,1,opt,name=scheme,proto3,enum=xds.core.v3.ResourceLocator_Scheme" json:"scheme,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ // Types that are assignable to ContextParamSpecifier:
+ //
+ // *ResourceLocator_ExactContext
+ ContextParamSpecifier isResourceLocator_ContextParamSpecifier `protobuf_oneof:"context_param_specifier"`
+ Directives []*ResourceLocator_Directive `protobuf:"bytes,6,rep,name=directives,proto3" json:"directives,omitempty"`
+}
+
+func (x *ResourceLocator) Reset() {
+ *x = ResourceLocator{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceLocator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceLocator) ProtoMessage() {}
+
+func (x *ResourceLocator) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead.
+func (*ResourceLocator) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceLocator) GetScheme() ResourceLocator_Scheme {
+ if x != nil {
+ return x.Scheme
+ }
+ return ResourceLocator_XDSTP
+}
+
+func (x *ResourceLocator) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *ResourceLocator) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *ResourceLocator) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (m *ResourceLocator) GetContextParamSpecifier() isResourceLocator_ContextParamSpecifier {
+ if m != nil {
+ return m.ContextParamSpecifier
+ }
+ return nil
+}
+
+func (x *ResourceLocator) GetExactContext() *ContextParams {
+ if x, ok := x.GetContextParamSpecifier().(*ResourceLocator_ExactContext); ok {
+ return x.ExactContext
+ }
+ return nil
+}
+
+func (x *ResourceLocator) GetDirectives() []*ResourceLocator_Directive {
+ if x != nil {
+ return x.Directives
+ }
+ return nil
+}
+
+type isResourceLocator_ContextParamSpecifier interface {
+ isResourceLocator_ContextParamSpecifier()
+}
+
+type ResourceLocator_ExactContext struct {
+ ExactContext *ContextParams `protobuf:"bytes,5,opt,name=exact_context,json=exactContext,proto3,oneof"`
+}
+
+func (*ResourceLocator_ExactContext) isResourceLocator_ContextParamSpecifier() {}
+
+type ResourceLocator_Directive struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Directive:
+ //
+ // *ResourceLocator_Directive_Alt
+ // *ResourceLocator_Directive_Entry
+ Directive isResourceLocator_Directive_Directive `protobuf_oneof:"directive"`
+}
+
+func (x *ResourceLocator_Directive) Reset() {
+ *x = ResourceLocator_Directive{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceLocator_Directive) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceLocator_Directive) ProtoMessage() {}
+
+func (x *ResourceLocator_Directive) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceLocator_Directive.ProtoReflect.Descriptor instead.
+func (*ResourceLocator_Directive) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *ResourceLocator_Directive) GetDirective() isResourceLocator_Directive_Directive {
+ if m != nil {
+ return m.Directive
+ }
+ return nil
+}
+
+func (x *ResourceLocator_Directive) GetAlt() *ResourceLocator {
+ if x, ok := x.GetDirective().(*ResourceLocator_Directive_Alt); ok {
+ return x.Alt
+ }
+ return nil
+}
+
+func (x *ResourceLocator_Directive) GetEntry() string {
+ if x, ok := x.GetDirective().(*ResourceLocator_Directive_Entry); ok {
+ return x.Entry
+ }
+ return ""
+}
+
+type isResourceLocator_Directive_Directive interface {
+ isResourceLocator_Directive_Directive()
+}
+
+type ResourceLocator_Directive_Alt struct {
+ Alt *ResourceLocator `protobuf:"bytes,1,opt,name=alt,proto3,oneof"`
+}
+
+type ResourceLocator_Directive_Entry struct {
+ Entry string `protobuf:"bytes,2,opt,name=entry,proto3,oneof"`
+}
+
+func (*ResourceLocator_Directive_Alt) isResourceLocator_Directive_Directive() {}
+
+func (*ResourceLocator_Directive_Entry) isResourceLocator_Directive_Directive() {}
+
+var File_xds_core_v3_resource_locator_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x04,
+ 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x12, 0x45, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x23, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01,
+ 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
+ 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74,
+ 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x52, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x1a,
+ 0x88, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x30, 0x0a,
+ 0x03, 0x61, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6c, 0x74, 0x12,
+ 0x37, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1f,
+ 0xfa, 0x42, 0x1c, 0x72, 0x1a, 0x10, 0x01, 0x32, 0x16, 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d,
+ 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x2f, 0x7e, 0x3a, 0x5d, 0x2b, 0x24, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x27, 0x0a, 0x06, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x58, 0x44, 0x53, 0x54, 0x50, 0x10, 0x00, 0x12,
+ 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c,
+ 0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_locator_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_locator_proto_rawDescData = file_xds_core_v3_resource_locator_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_locator_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_locator_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_locator_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_locator_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_locator_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_locator_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_xds_core_v3_resource_locator_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_resource_locator_proto_goTypes = []interface{}{
+ (ResourceLocator_Scheme)(0), // 0: xds.core.v3.ResourceLocator.Scheme
+ (*ResourceLocator)(nil), // 1: xds.core.v3.ResourceLocator
+ (*ResourceLocator_Directive)(nil), // 2: xds.core.v3.ResourceLocator.Directive
+ (*ContextParams)(nil), // 3: xds.core.v3.ContextParams
+}
+var file_xds_core_v3_resource_locator_proto_depIdxs = []int32{
+ 0, // 0: xds.core.v3.ResourceLocator.scheme:type_name -> xds.core.v3.ResourceLocator.Scheme
+ 3, // 1: xds.core.v3.ResourceLocator.exact_context:type_name -> xds.core.v3.ContextParams
+ 2, // 2: xds.core.v3.ResourceLocator.directives:type_name -> xds.core.v3.ResourceLocator.Directive
+ 1, // 3: xds.core.v3.ResourceLocator.Directive.alt:type_name -> xds.core.v3.ResourceLocator
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_locator_proto_init() }
+func file_xds_core_v3_resource_locator_proto_init() {
+ if File_xds_core_v3_resource_locator_proto != nil {
+ return
+ }
+ file_xds_core_v3_context_params_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_locator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceLocator); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceLocator_Directive); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*ResourceLocator_ExactContext)(nil),
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ResourceLocator_Directive_Alt)(nil),
+ (*ResourceLocator_Directive_Entry)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_locator_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_locator_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_locator_proto_depIdxs,
+ EnumInfos: file_xds_core_v3_resource_locator_proto_enumTypes,
+ MessageInfos: file_xds_core_v3_resource_locator_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_locator_proto = out.File
+ file_xds_core_v3_resource_locator_proto_rawDesc = nil
+ file_xds_core_v3_resource_locator_proto_goTypes = nil
+ file_xds_core_v3_resource_locator_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
new file mode 100644
index 000000000..1686e98d1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
@@ -0,0 +1,439 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_locator.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceLocator with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ResourceLocator) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocatorMultiError, or nil if none found.
+func (m *ResourceLocator) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok {
+ err := ResourceLocatorValidationError{
+ field: "Scheme",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ err := ResourceLocatorValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetDirectives() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch v := m.ContextParamSpecifier.(type) {
+ case *ResourceLocator_ExactContext:
+ if v == nil {
+ err := ResourceLocatorValidationError{
+ field: "ContextParamSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExactContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return ResourceLocatorMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceLocatorMultiError is an error wrapping multiple validation errors
+// returned by ResourceLocator.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceLocatorMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocatorMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocatorMultiError) AllErrors() []error { return m }
+
+// ResourceLocatorValidationError is the validation error returned by
+// ResourceLocator.Validate if the designated constraints aren't met.
+type ResourceLocatorValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocatorValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocatorValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocatorValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocatorValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceLocatorValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocatorValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocatorValidationError{}
+
+// Validate checks the field values on ResourceLocator_Directive with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceLocator_Directive) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator_Directive with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocator_DirectiveMultiError, or nil if none found.
+func (m *ResourceLocator_Directive) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator_Directive) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofDirectivePresent := false
+ switch v := m.Directive.(type) {
+ case *ResourceLocator_Directive_Alt:
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
+
+ if all {
+ switch v := interface{}(m.GetAlt()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ResourceLocator_Directive_Entry:
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
+
+ if utf8.RuneCountInString(m.GetEntry()) < 1 {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofDirectivePresent {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return ResourceLocator_DirectiveMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation
+// errors returned by ResourceLocator_Directive.ValidateAll() if the
+// designated constraints aren't met.
+type ResourceLocator_DirectiveMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocator_DirectiveMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m }
+
+// ResourceLocator_DirectiveValidationError is the validation error returned by
+// ResourceLocator_Directive.Validate if the designated constraints aren't met.
+type ResourceLocator_DirectiveValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocator_DirectiveValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocator_DirectiveValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocator_DirectiveValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocator_DirectiveValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocator_DirectiveValidationError) ErrorName() string {
+ return "ResourceLocator_DirectiveValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceLocator_DirectiveValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator_Directive.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocator_DirectiveValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocator_DirectiveValidationError{}
+
+var _ResourceLocator_Directive_Entry_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\./~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
new file mode 100644
index 000000000..65f65fdbd
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
@@ -0,0 +1,190 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource_name.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceName struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,3,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ Context *ContextParams `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"`
+}
+
+func (x *ResourceName) Reset() {
+ *x = ResourceName{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_name_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceName) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceName) ProtoMessage() {}
+
+func (x *ResourceName) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_name_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead.
+func (*ResourceName) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_name_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceName) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *ResourceName) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *ResourceName) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (x *ResourceName) GetContext() *ContextParams {
+ if x != nil {
+ return x.Context
+ }
+ return nil
+}
+
+var File_xds_core_v3_resource_name_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_name_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f,
+ 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_name_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_name_proto_rawDescData = file_xds_core_v3_resource_name_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_name_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_name_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_name_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_name_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_name_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_name_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_resource_name_proto_goTypes = []interface{}{
+ (*ResourceName)(nil), // 0: xds.core.v3.ResourceName
+ (*ContextParams)(nil), // 1: xds.core.v3.ContextParams
+}
+var file_xds_core_v3_resource_name_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.ResourceName.context:type_name -> xds.core.v3.ContextParams
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_name_proto_init() }
+func file_xds_core_v3_resource_name_proto_init() {
+ if File_xds_core_v3_resource_name_proto != nil {
+ return
+ }
+ file_xds_core_v3_context_params_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_name_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceName); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_name_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_name_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_name_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_resource_name_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_name_proto = out.File
+ file_xds_core_v3_resource_name_proto_rawDesc = nil
+ file_xds_core_v3_resource_name_proto_goTypes = nil
+ file_xds_core_v3_resource_name_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
new file mode 100644
index 000000000..270e921bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_name.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceName with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ResourceName) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceName with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceNameMultiError, or
+// nil if none found.
+func (m *ResourceName) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceName) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ err := ResourceNameValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ResourceNameMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceNameMultiError is an error wrapping multiple validation errors
+// returned by ResourceName.ValidateAll() if the designated constraints aren't met.
+type ResourceNameMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceNameMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceNameMultiError) AllErrors() []error { return m }
+
+// ResourceNameValidationError is the validation error returned by
+// ResourceName.Validate if the designated constraints aren't met.
+type ResourceNameValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceNameValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceNameValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceNameValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceNameValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceNameValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceName.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceNameValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceNameValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go
new file mode 100644
index 000000000..f929ca637
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go
@@ -0,0 +1,272 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/data/orca/v3/orca_load_report.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type OrcaLoadReport struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"`
+ MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"`
+ // Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto.
+ Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"`
+ RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"`
+ Eps float64 `protobuf:"fixed64,7,opt,name=eps,proto3" json:"eps,omitempty"`
+ NamedMetrics map[string]float64 `protobuf:"bytes,8,rep,name=named_metrics,json=namedMetrics,proto3" json:"named_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ ApplicationUtilization float64 `protobuf:"fixed64,9,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"`
+}
+
+func (x *OrcaLoadReport) Reset() {
+ *x = OrcaLoadReport{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrcaLoadReport) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrcaLoadReport) ProtoMessage() {}
+
+func (x *OrcaLoadReport) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrcaLoadReport.ProtoReflect.Descriptor instead.
+func (*OrcaLoadReport) Descriptor() ([]byte, []int) {
+ return file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OrcaLoadReport) GetCpuUtilization() float64 {
+ if x != nil {
+ return x.CpuUtilization
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetMemUtilization() float64 {
+ if x != nil {
+ return x.MemUtilization
+ }
+ return 0
+}
+
+// Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto.
+func (x *OrcaLoadReport) GetRps() uint64 {
+ if x != nil {
+ return x.Rps
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetRequestCost() map[string]float64 {
+ if x != nil {
+ return x.RequestCost
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetUtilization() map[string]float64 {
+ if x != nil {
+ return x.Utilization
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetRpsFractional() float64 {
+ if x != nil {
+ return x.RpsFractional
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetEps() float64 {
+ if x != nil {
+ return x.Eps
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetNamedMetrics() map[string]float64 {
+ if x != nil {
+ return x.NamedMetrics
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetApplicationUtilization() float64 {
+ if x != nil {
+ return x.ApplicationUtilization
+ }
+ return 0
+}
+
+var File_xds_data_orca_v3_orca_load_report_proto protoreflect.FileDescriptor
+
+var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f,
+ 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61,
+ 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75,
+ 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12,
+ 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42,
+ 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
+ 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x71,
+ 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f,
+ 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52,
+ 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x9a, 0x01, 0x16, 0x2a, 0x14,
+ 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09,
+ 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63,
+ 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10,
+ 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11,
+ 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a,
+ 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72,
+ 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce sync.Once
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescData = file_xds_data_orca_v3_orca_load_report_proto_rawDesc
+)
+
+func file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP() []byte {
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce.Do(func() {
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_data_orca_v3_orca_load_report_proto_rawDescData)
+ })
+ return file_xds_data_orca_v3_orca_load_report_proto_rawDescData
+}
+
+var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_xds_data_orca_v3_orca_load_report_proto_goTypes = []interface{}{
+ (*OrcaLoadReport)(nil), // 0: xds.data.orca.v3.OrcaLoadReport
+ nil, // 1: xds.data.orca.v3.OrcaLoadReport.RequestCostEntry
+ nil, // 2: xds.data.orca.v3.OrcaLoadReport.UtilizationEntry
+ nil, // 3: xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry
+}
+var file_xds_data_orca_v3_orca_load_report_proto_depIdxs = []int32{
+ 1, // 0: xds.data.orca.v3.OrcaLoadReport.request_cost:type_name -> xds.data.orca.v3.OrcaLoadReport.RequestCostEntry
+ 2, // 1: xds.data.orca.v3.OrcaLoadReport.utilization:type_name -> xds.data.orca.v3.OrcaLoadReport.UtilizationEntry
+ 3, // 2: xds.data.orca.v3.OrcaLoadReport.named_metrics:type_name -> xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_data_orca_v3_orca_load_report_proto_init() }
+func file_xds_data_orca_v3_orca_load_report_proto_init() {
+ if File_xds_data_orca_v3_orca_load_report_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrcaLoadReport); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_data_orca_v3_orca_load_report_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_data_orca_v3_orca_load_report_proto_goTypes,
+ DependencyIndexes: file_xds_data_orca_v3_orca_load_report_proto_depIdxs,
+ MessageInfos: file_xds_data_orca_v3_orca_load_report_proto_msgTypes,
+ }.Build()
+ File_xds_data_orca_v3_orca_load_report_proto = out.File
+ file_xds_data_orca_v3_orca_load_report_proto_rawDesc = nil
+ file_xds_data_orca_v3_orca_load_report_proto_goTypes = nil
+ file_xds_data_orca_v3_orca_load_report_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go
new file mode 100644
index 000000000..8dd55330a
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go
@@ -0,0 +1,225 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/data/orca/v3/orca_load_report.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on OrcaLoadReport with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *OrcaLoadReport) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrcaLoadReport with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in OrcaLoadReportMultiError,
+// or nil if none found.
+func (m *OrcaLoadReport) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrcaLoadReport) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetCpuUtilization() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "CpuUtilization",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if val := m.GetMemUtilization(); val < 0 || val > 1 {
+ err := OrcaLoadReportValidationError{
+ field: "MemUtilization",
+ reason: "value must be inside range [0, 1]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Rps
+
+ // no validation rules for RequestCost
+
+ {
+ sorted_keys := make([]string, len(m.GetUtilization()))
+ i := 0
+ for key := range m.GetUtilization() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetUtilization()[key]
+ _ = val
+
+ // no validation rules for Utilization[key]
+
+ if val := val; val < 0 || val > 1 {
+ err := OrcaLoadReportValidationError{
+ field: fmt.Sprintf("Utilization[%v]", key),
+ reason: "value must be inside range [0, 1]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if m.GetRpsFractional() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "RpsFractional",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetEps() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "Eps",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for NamedMetrics
+
+ if m.GetApplicationUtilization() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "ApplicationUtilization",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return OrcaLoadReportMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrcaLoadReportMultiError is an error wrapping multiple validation errors
+// returned by OrcaLoadReport.ValidateAll() if the designated constraints
+// aren't met.
+type OrcaLoadReportMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrcaLoadReportMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrcaLoadReportMultiError) AllErrors() []error { return m }
+
+// OrcaLoadReportValidationError is the validation error returned by
+// OrcaLoadReport.Validate if the designated constraints aren't met.
+type OrcaLoadReportValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrcaLoadReportValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrcaLoadReportValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrcaLoadReportValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrcaLoadReportValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrcaLoadReportValidationError) ErrorName() string { return "OrcaLoadReportValidationError" }
+
+// Error satisfies the builtin error interface
+func (e OrcaLoadReportValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrcaLoadReport.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrcaLoadReportValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrcaLoadReportValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go
new file mode 100644
index 000000000..32e4a37bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/data/orca/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type OrcaLoadReportRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"`
+ RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"`
+}
+
+func (x *OrcaLoadReportRequest) Reset() {
+ *x = OrcaLoadReportRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrcaLoadReportRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrcaLoadReportRequest) ProtoMessage() {}
+
+func (x *OrcaLoadReportRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrcaLoadReportRequest.ProtoReflect.Descriptor instead.
+func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) {
+ return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration {
+ if x != nil {
+ return x.ReportInterval
+ }
+ return nil
+}
+
+func (x *OrcaLoadReportRequest) GetRequestCostNames() []string {
+ if x != nil {
+ return x.RequestCostNames
+ }
+ return nil
+}
+
+var File_xds_service_orca_v3_orca_proto protoreflect.FileDescriptor
+
+var file_xds_service_orca_v3_orca_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72,
+ 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72,
+ 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f,
+ 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89,
+ 0x01, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65,
+ 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x43, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x75, 0x0a, 0x0e, 0x4f, 0x70,
+ 0x65, 0x6e, 0x52, 0x63, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x11,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x72, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x12, 0x2a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64,
+ 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x30,
+ 0x01, 0x42, 0x59, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, 0x63, 0x61,
+ 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4f, 0x72, 0x63, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_service_orca_v3_orca_proto_rawDescOnce sync.Once
+ file_xds_service_orca_v3_orca_proto_rawDescData = file_xds_service_orca_v3_orca_proto_rawDesc
+)
+
+func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte {
+ file_xds_service_orca_v3_orca_proto_rawDescOnce.Do(func() {
+ file_xds_service_orca_v3_orca_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_service_orca_v3_orca_proto_rawDescData)
+ })
+ return file_xds_service_orca_v3_orca_proto_rawDescData
+}
+
+var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{
+ (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest
+ (*durationpb.Duration)(nil), // 1: google.protobuf.Duration
+ (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport
+}
+var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{
+ 1, // 0: xds.service.orca.v3.OrcaLoadReportRequest.report_interval:type_name -> google.protobuf.Duration
+ 0, // 1: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:input_type -> xds.service.orca.v3.OrcaLoadReportRequest
+ 2, // 2: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:output_type -> xds.data.orca.v3.OrcaLoadReport
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_service_orca_v3_orca_proto_init() }
+func file_xds_service_orca_v3_orca_proto_init() {
+ if File_xds_service_orca_v3_orca_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_service_orca_v3_orca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrcaLoadReportRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_service_orca_v3_orca_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_xds_service_orca_v3_orca_proto_goTypes,
+ DependencyIndexes: file_xds_service_orca_v3_orca_proto_depIdxs,
+ MessageInfos: file_xds_service_orca_v3_orca_proto_msgTypes,
+ }.Build()
+ File_xds_service_orca_v3_orca_proto = out.File
+ file_xds_service_orca_v3_orca_proto_rawDesc = nil
+ file_xds_service_orca_v3_orca_proto_goTypes = nil
+ file_xds_service_orca_v3_orca_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go
new file mode 100644
index 000000000..8949e8372
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on OrcaLoadReportRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *OrcaLoadReportRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrcaLoadReportRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// OrcaLoadReportRequestMultiError, or nil if none found.
+func (m *OrcaLoadReportRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrcaLoadReportRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetReportInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return OrcaLoadReportRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrcaLoadReportRequestMultiError is an error wrapping multiple validation
+// errors returned by OrcaLoadReportRequest.ValidateAll() if the designated
+// constraints aren't met.
+type OrcaLoadReportRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrcaLoadReportRequestMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrcaLoadReportRequestMultiError) AllErrors() []error { return m }
+
+// OrcaLoadReportRequestValidationError is the validation error returned by
+// OrcaLoadReportRequest.Validate if the designated constraints aren't met.
+type OrcaLoadReportRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrcaLoadReportRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrcaLoadReportRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrcaLoadReportRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrcaLoadReportRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrcaLoadReportRequestValidationError) ErrorName() string {
+ return "OrcaLoadReportRequestValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e OrcaLoadReportRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrcaLoadReportRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrcaLoadReportRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrcaLoadReportRequestValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go
new file mode 100644
index 000000000..8a92439e0
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go
@@ -0,0 +1,135 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v5.29.1
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ context "context"
+ v3 "github.com/cncf/xds/go/xds/data/orca/v3"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics"
+)
+
+// OpenRcaServiceClient is the client API for OpenRcaService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type OpenRcaServiceClient interface {
+ StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error)
+}
+
+type openRcaServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient {
+ return &openRcaServiceClient{cc}
+}
+
+func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &openRcaServiceStreamCoreMetricsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type OpenRcaService_StreamCoreMetricsClient interface {
+ Recv() (*v3.OrcaLoadReport, error)
+ grpc.ClientStream
+}
+
+type openRcaServiceStreamCoreMetricsClient struct {
+ grpc.ClientStream
+}
+
+func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) {
+ m := new(v3.OrcaLoadReport)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// OpenRcaServiceServer is the server API for OpenRcaService service.
+// All implementations should embed UnimplementedOpenRcaServiceServer
+// for forward compatibility
+type OpenRcaServiceServer interface {
+ StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error
+}
+
+// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations.
+type UnimplementedOpenRcaServiceServer struct {
+}
+
+func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented")
+}
+
+// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will
+// result in compilation errors.
+type UnsafeOpenRcaServiceServer interface {
+ mustEmbedUnimplementedOpenRcaServiceServer()
+}
+
+func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) {
+ s.RegisterService(&OpenRcaService_ServiceDesc, srv)
+}
+
+func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OrcaLoadReportRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream})
+}
+
+type OpenRcaService_StreamCoreMetricsServer interface {
+ Send(*v3.OrcaLoadReport) error
+ grpc.ServerStream
+}
+
+type openRcaServiceStreamCoreMetricsServer struct {
+ grpc.ServerStream
+}
+
+func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var OpenRcaService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "xds.service.orca.v3.OpenRcaService",
+ HandlerType: (*OpenRcaServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamCoreMetrics",
+ Handler: _OpenRcaService_StreamCoreMetrics_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "xds/service/orca/v3/orca.proto",
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
new file mode 100644
index 000000000..1bd4aaf60
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
@@ -0,0 +1,168 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/cel.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CelMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ExprMatch *v3.CelExpression `protobuf:"bytes,1,opt,name=expr_match,json=exprMatch,proto3" json:"expr_match,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *CelMatcher) Reset() {
+ *x = CelMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelMatcher) ProtoMessage() {}
+
+func (x *CelMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelMatcher.ProtoReflect.Descriptor instead.
+func (*CelMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_cel_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CelMatcher) GetExprMatch() *v3.CelExpression {
+ if x != nil {
+ return x.ExprMatch
+ }
+ return nil
+}
+
+func (x *CelMatcher) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+var File_xds_type_matcher_v3_cel_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78,
+ 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_cel_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_cel_proto_rawDescData = file_xds_type_matcher_v3_cel_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_cel_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_cel_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_cel_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_cel_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_matcher_v3_cel_proto_goTypes = []interface{}{
+ (*CelMatcher)(nil), // 0: xds.type.matcher.v3.CelMatcher
+ (*v3.CelExpression)(nil), // 1: xds.type.v3.CelExpression
+}
+var file_xds_type_matcher_v3_cel_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.CelMatcher.expr_match:type_name -> xds.type.v3.CelExpression
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_cel_proto_init() }
+func file_xds_type_matcher_v3_cel_proto_init() {
+ if File_xds_type_matcher_v3_cel_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_cel_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_cel_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_cel_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_cel_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_cel_proto = out.File
+ file_xds_type_matcher_v3_cel_proto_rawDesc = nil
+ file_xds_type_matcher_v3_cel_proto_goTypes = nil
+ file_xds_type_matcher_v3_cel_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go
new file mode 100644
index 000000000..091267b0c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go
@@ -0,0 +1,177 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/cel.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CelMatcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CelMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CelMatcherMultiError, or
+// nil if none found.
+func (m *CelMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetExprMatch() == nil {
+ err := CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExprMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExprMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Description
+
+ if len(errors) > 0 {
+ return CelMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelMatcherMultiError is an error wrapping multiple validation errors
+// returned by CelMatcher.ValidateAll() if the designated constraints aren't met.
+type CelMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelMatcherMultiError) AllErrors() []error { return m }
+
+// CelMatcherValidationError is the validation error returned by
+// CelMatcher.Validate if the designated constraints aren't met.
+type CelMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelMatcherValidationError) ErrorName() string { return "CelMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go
new file mode 100644
index 000000000..3053b35f9
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/domain.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ServerNameMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DomainMatchers []*ServerNameMatcher_DomainMatcher `protobuf:"bytes,1,rep,name=domain_matchers,json=domainMatchers,proto3" json:"domain_matchers,omitempty"`
+}
+
+func (x *ServerNameMatcher) Reset() {
+ *x = ServerNameMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerNameMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerNameMatcher) ProtoMessage() {}
+
+func (x *ServerNameMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerNameMatcher.ProtoReflect.Descriptor instead.
+func (*ServerNameMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerNameMatcher) GetDomainMatchers() []*ServerNameMatcher_DomainMatcher {
+ if x != nil {
+ return x.DomainMatchers
+ }
+ return nil
+}
+
+type ServerNameMatcher_DomainMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *ServerNameMatcher_DomainMatcher) Reset() {
+ *x = ServerNameMatcher_DomainMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerNameMatcher_DomainMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerNameMatcher_DomainMatcher) ProtoMessage() {}
+
+func (x *ServerNameMatcher_DomainMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerNameMatcher_DomainMatcher.ProtoReflect.Descriptor instead.
+func (*ServerNameMatcher_DomainMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *ServerNameMatcher_DomainMatcher) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *ServerNameMatcher_DomainMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_domain_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_domain_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e,
+ 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0f, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x6d, 0x61,
+ 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x6f, 0x6d, 0x61, 0x69,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x74, 0x0a, 0x0d, 0x44, 0x6f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x07, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3f,
+ 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42,
+ 0x6e, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_domain_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_domain_proto_rawDescData = file_xds_type_matcher_v3_domain_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_domain_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_domain_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_domain_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_domain_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_domain_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_domain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_domain_proto_goTypes = []interface{}{
+ (*ServerNameMatcher)(nil), // 0: xds.type.matcher.v3.ServerNameMatcher
+ (*ServerNameMatcher_DomainMatcher)(nil), // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher
+ (*Matcher_OnMatch)(nil), // 2: xds.type.matcher.v3.Matcher.OnMatch
+}
+var file_xds_type_matcher_v3_domain_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.ServerNameMatcher.domain_matchers:type_name -> xds.type.matcher.v3.ServerNameMatcher.DomainMatcher
+ 2, // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_domain_proto_init() }
+func file_xds_type_matcher_v3_domain_proto_init() {
+ if File_xds_type_matcher_v3_domain_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_domain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerNameMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_domain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerNameMatcher_DomainMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_domain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_domain_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_domain_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_domain_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_domain_proto = out.File
+ file_xds_type_matcher_v3_domain_proto_rawDesc = nil
+ file_xds_type_matcher_v3_domain_proto_goTypes = nil
+ file_xds_type_matcher_v3_domain_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go
new file mode 100644
index 000000000..e95bdfa28
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go
@@ -0,0 +1,315 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/domain.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ServerNameMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ServerNameMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerNameMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ServerNameMatcherMultiError, or nil if none found.
+func (m *ServerNameMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerNameMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetDomainMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ServerNameMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerNameMatcherMultiError is an error wrapping multiple validation errors
+// returned by ServerNameMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type ServerNameMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerNameMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerNameMatcherMultiError) AllErrors() []error { return m }
+
+// ServerNameMatcherValidationError is the validation error returned by
+// ServerNameMatcher.Validate if the designated constraints aren't met.
+type ServerNameMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerNameMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerNameMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerNameMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerNameMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerNameMatcherValidationError) ErrorName() string {
+ return "ServerNameMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ServerNameMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerNameMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerNameMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerNameMatcherValidationError{}
+
+// Validate checks the field values on ServerNameMatcher_DomainMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ServerNameMatcher_DomainMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerNameMatcher_DomainMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ServerNameMatcher_DomainMatcherMultiError, or nil if none found.
+func (m *ServerNameMatcher_DomainMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerNameMatcher_DomainMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetDomains()) < 1 {
+ err := ServerNameMatcher_DomainMatcherValidationError{
+ field: "Domains",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ServerNameMatcher_DomainMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerNameMatcher_DomainMatcherMultiError is an error wrapping multiple
+// validation errors returned by ServerNameMatcher_DomainMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type ServerNameMatcher_DomainMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerNameMatcher_DomainMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerNameMatcher_DomainMatcherMultiError) AllErrors() []error { return m }
+
+// ServerNameMatcher_DomainMatcherValidationError is the validation error
+// returned by ServerNameMatcher_DomainMatcher.Validate if the designated
+// constraints aren't met.
+type ServerNameMatcher_DomainMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerNameMatcher_DomainMatcherValidationError) ErrorName() string {
+ return "ServerNameMatcher_DomainMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ServerNameMatcher_DomainMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerNameMatcher_DomainMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerNameMatcher_DomainMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerNameMatcher_DomainMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
new file mode 100644
index 000000000..eedcacec6
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/http_inputs.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HttpAttributesCelMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HttpAttributesCelMatchInput) Reset() {
+ *x = HttpAttributesCelMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpAttributesCelMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpAttributesCelMatchInput) ProtoMessage() {}
+
+func (x *HttpAttributesCelMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpAttributesCelMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpAttributesCelMatchInput) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0}
+}
+
+var File_xds_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65,
+ 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48,
+ 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescData = file_xds_type_matcher_v3_http_inputs_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_http_inputs_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_http_inputs_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{
+ (*HttpAttributesCelMatchInput)(nil), // 0: xds.type.matcher.v3.HttpAttributesCelMatchInput
+}
+var file_xds_type_matcher_v3_http_inputs_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_http_inputs_proto_init() }
+func file_xds_type_matcher_v3_http_inputs_proto_init() {
+ if File_xds_type_matcher_v3_http_inputs_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpAttributesCelMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_http_inputs_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_http_inputs_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_http_inputs_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_http_inputs_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_http_inputs_proto = out.File
+ file_xds_type_matcher_v3_http_inputs_proto_rawDesc = nil
+ file_xds_type_matcher_v3_http_inputs_proto_goTypes = nil
+ file_xds_type_matcher_v3_http_inputs_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go
new file mode 100644
index 000000000..5d8742927
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go
@@ -0,0 +1,139 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/http_inputs.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on HttpAttributesCelMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HttpAttributesCelMatchInput) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpAttributesCelMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HttpAttributesCelMatchInputMultiError, or nil if none found.
+func (m *HttpAttributesCelMatchInput) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpAttributesCelMatchInput) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return HttpAttributesCelMatchInputMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpAttributesCelMatchInputMultiError is an error wrapping multiple
+// validation errors returned by HttpAttributesCelMatchInput.ValidateAll() if
+// the designated constraints aren't met.
+type HttpAttributesCelMatchInputMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpAttributesCelMatchInputMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpAttributesCelMatchInputMultiError) AllErrors() []error { return m }
+
+// HttpAttributesCelMatchInputValidationError is the validation error returned
+// by HttpAttributesCelMatchInput.Validate if the designated constraints
+// aren't met.
+type HttpAttributesCelMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpAttributesCelMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpAttributesCelMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpAttributesCelMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpAttributesCelMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpAttributesCelMatchInputValidationError) ErrorName() string {
+ return "HttpAttributesCelMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpAttributesCelMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpAttributesCelMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpAttributesCelMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpAttributesCelMatchInputValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
new file mode 100644
index 000000000..6facd7aeb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
@@ -0,0 +1,256 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/ip.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type IPMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*IPMatcher_IPRangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *IPMatcher) Reset() {
+ *x = IPMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IPMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IPMatcher) ProtoMessage() {}
+
+func (x *IPMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IPMatcher.ProtoReflect.Descriptor instead.
+func (*IPMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *IPMatcher) GetRangeMatchers() []*IPMatcher_IPRangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type IPMatcher_IPRangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.CidrRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+ Exclusive bool `protobuf:"varint,3,opt,name=exclusive,proto3" json:"exclusive,omitempty"`
+}
+
+func (x *IPMatcher_IPRangeMatcher) Reset() {
+ *x = IPMatcher_IPRangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IPMatcher_IPRangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IPMatcher_IPRangeMatcher) ProtoMessage() {}
+
+func (x *IPMatcher_IPRangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IPMatcher_IPRangeMatcher.ProtoReflect.Descriptor instead.
+func (*IPMatcher_IPRangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetRanges() []*v3.CidrRange {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetExclusive() bool {
+ if x != nil {
+ return x.Exclusive
+ }
+ return false
+}
+
+var File_xds_type_matcher_v3_ip_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_ip_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x69, 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64,
+ 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76,
+ 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x09, 0x49, 0x50, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x49,
+ 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0x0a,
+ 0x0e, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12,
+ 0x38, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69,
+ 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65,
+ 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x42, 0x66, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02,
+ 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x42, 0x0e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_ip_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_ip_proto_rawDescData = file_xds_type_matcher_v3_ip_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_ip_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_ip_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_ip_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_ip_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_ip_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_ip_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_ip_proto_goTypes = []interface{}{
+ (*IPMatcher)(nil), // 0: xds.type.matcher.v3.IPMatcher
+ (*IPMatcher_IPRangeMatcher)(nil), // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher
+ (*v3.CidrRange)(nil), // 2: xds.core.v3.CidrRange
+ (*Matcher_OnMatch)(nil), // 3: xds.type.matcher.v3.Matcher.OnMatch
+}
+var file_xds_type_matcher_v3_ip_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.IPMatcher.range_matchers:type_name -> xds.type.matcher.v3.IPMatcher.IPRangeMatcher
+ 2, // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.ranges:type_name -> xds.core.v3.CidrRange
+ 3, // 2: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_ip_proto_init() }
+func file_xds_type_matcher_v3_ip_proto_init() {
+ if File_xds_type_matcher_v3_ip_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_ip_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IPMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_ip_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IPMatcher_IPRangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_ip_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_ip_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_ip_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_ip_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_ip_proto = out.File
+ file_xds_type_matcher_v3_ip_proto_rawDesc = nil
+ file_xds_type_matcher_v3_ip_proto_goTypes = nil
+ file_xds_type_matcher_v3_ip_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
new file mode 100644
index 000000000..c1fca03bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/ip.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on IPMatcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *IPMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on IPMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in IPMatcherMultiError, or nil
+// if none found.
+func (m *IPMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *IPMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return IPMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// IPMatcherMultiError is an error wrapping multiple validation errors returned
+// by IPMatcher.ValidateAll() if the designated constraints aren't met.
+type IPMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m IPMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m IPMatcherMultiError) AllErrors() []error { return m }
+
+// IPMatcherValidationError is the validation error returned by
+// IPMatcher.Validate if the designated constraints aren't met.
+type IPMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e IPMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e IPMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e IPMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e IPMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e IPMatcherValidationError) ErrorName() string { return "IPMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e IPMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sIPMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = IPMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = IPMatcherValidationError{}
+
+// Validate checks the field values on IPMatcher_IPRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *IPMatcher_IPRangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on IPMatcher_IPRangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// IPMatcher_IPRangeMatcherMultiError, or nil if none found.
+func (m *IPMatcher_IPRangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *IPMatcher_IPRangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := IPMatcher_IPRangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Exclusive
+
+ if len(errors) > 0 {
+ return IPMatcher_IPRangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// IPMatcher_IPRangeMatcherMultiError is an error wrapping multiple validation
+// errors returned by IPMatcher_IPRangeMatcher.ValidateAll() if the designated
+// constraints aren't met.
+type IPMatcher_IPRangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m IPMatcher_IPRangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m IPMatcher_IPRangeMatcherMultiError) AllErrors() []error { return m }
+
+// IPMatcher_IPRangeMatcherValidationError is the validation error returned by
+// IPMatcher_IPRangeMatcher.Validate if the designated constraints aren't met.
+type IPMatcher_IPRangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e IPMatcher_IPRangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e IPMatcher_IPRangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e IPMatcher_IPRangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e IPMatcher_IPRangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e IPMatcher_IPRangeMatcherValidationError) ErrorName() string {
+ return "IPMatcher_IPRangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e IPMatcher_IPRangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sIPMatcher_IPRangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = IPMatcher_IPRangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = IPMatcher_IPRangeMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
new file mode 100644
index 000000000..ac8dd4f19
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
@@ -0,0 +1,1056 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/matcher.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Matcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatcherType:
+ //
+ // *Matcher_MatcherList_
+ // *Matcher_MatcherTree_
+ MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"`
+ OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"`
+}
+
+func (x *Matcher) Reset() {
+ *x = Matcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher) ProtoMessage() {}
+
+func (x *Matcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher.ProtoReflect.Descriptor instead.
+func (*Matcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Matcher) GetMatcherType() isMatcher_MatcherType {
+ if m != nil {
+ return m.MatcherType
+ }
+ return nil
+}
+
+func (x *Matcher) GetMatcherList() *Matcher_MatcherList {
+ if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok {
+ return x.MatcherList
+ }
+ return nil
+}
+
+func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree {
+ if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok {
+ return x.MatcherTree
+ }
+ return nil
+}
+
+func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnNoMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherType interface {
+ isMatcher_MatcherType()
+}
+
+type Matcher_MatcherList_ struct {
+ MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_ struct {
+ MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_) isMatcher_MatcherType() {}
+
+func (*Matcher_MatcherTree_) isMatcher_MatcherType() {}
+
+type Matcher_OnMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to OnMatch:
+ //
+ // *Matcher_OnMatch_Matcher
+ // *Matcher_OnMatch_Action
+ OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"`
+}
+
+func (x *Matcher_OnMatch) Reset() {
+ *x = Matcher_OnMatch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_OnMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_OnMatch) ProtoMessage() {}
+
+func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead.
+func (*Matcher_OnMatch) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch {
+ if m != nil {
+ return m.OnMatch
+ }
+ return nil
+}
+
+func (x *Matcher_OnMatch) GetMatcher() *Matcher {
+ if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok {
+ return x.Matcher
+ }
+ return nil
+}
+
+func (x *Matcher_OnMatch) GetAction() *v3.TypedExtensionConfig {
+ if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok {
+ return x.Action
+ }
+ return nil
+}
+
+type isMatcher_OnMatch_OnMatch interface {
+ isMatcher_OnMatch_OnMatch()
+}
+
+type Matcher_OnMatch_Matcher struct {
+ Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"`
+}
+
+type Matcher_OnMatch_Action struct {
+ Action *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"`
+}
+
+func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {}
+
+func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {}
+
+type Matcher_MatcherList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"`
+}
+
+func (x *Matcher_MatcherList) Reset() {
+ *x = Matcher_MatcherList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList) ProtoMessage() {}
+
+func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher {
+ if x != nil {
+ return x.Matchers
+ }
+ return nil
+}
+
+type Matcher_MatcherTree struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
+ // Types that are assignable to TreeType:
+ //
+ // *Matcher_MatcherTree_ExactMatchMap
+ // *Matcher_MatcherTree_PrefixMatchMap
+ // *Matcher_MatcherTree_CustomMatch
+ TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"`
+}
+
+func (x *Matcher_MatcherTree) Reset() {
+ *x = Matcher_MatcherTree{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherTree) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherTree) ProtoMessage() {}
+
+func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Matcher_MatcherTree) GetInput() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Input
+ }
+ return nil
+}
+
+func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType {
+ if m != nil {
+ return m.TreeType
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok {
+ return x.ExactMatchMap
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok {
+ return x.PrefixMatchMap
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetCustomMatch() *v3.TypedExtensionConfig {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok {
+ return x.CustomMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherTree_TreeType interface {
+ isMatcher_MatcherTree_TreeType()
+}
+
+type Matcher_MatcherTree_ExactMatchMap struct {
+ ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_PrefixMatchMap struct {
+ PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_CustomMatch struct {
+ CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"`
+}
+
+func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {}
+
+func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {}
+
+func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {}
+
+type Matcher_MatcherList_Predicate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchType:
+ //
+ // *Matcher_MatcherList_Predicate_SinglePredicate_
+ // *Matcher_MatcherList_Predicate_OrMatcher
+ // *Matcher_MatcherList_Predicate_AndMatcher
+ // *Matcher_MatcherList_Predicate_NotMatcher
+ MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"`
+}
+
+func (x *Matcher_MatcherList_Predicate) Reset() {
+ *x = Matcher_MatcherList_Predicate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType {
+ if m != nil {
+ return m.MatchType
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok {
+ return x.SinglePredicate
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok {
+ return x.OrMatcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok {
+ return x.AndMatcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok {
+ return x.NotMatcher
+ }
+ return nil
+}
+
+type isMatcher_MatcherList_Predicate_MatchType interface {
+ isMatcher_MatcherList_Predicate_MatchType()
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_ struct {
+ SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_OrMatcher struct {
+ OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_AndMatcher struct {
+ AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_NotMatcher struct {
+ NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+type Matcher_MatcherList_FieldMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) Reset() {
+ *x = Matcher_MatcherList_FieldMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate {
+ if x != nil {
+ return x.Predicate
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
+ // Types that are assignable to Matcher:
+ //
+ // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch
+ // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch
+ Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"`
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() {
+ *x = Matcher_MatcherList_Predicate_SinglePredicate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0}
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Input
+ }
+ return nil
+}
+
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher {
+ if m != nil {
+ return m.Matcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *StringMatcher {
+ if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok {
+ return x.ValueMatch
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v3.TypedExtensionConfig {
+ if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok {
+ return x.CustomMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface {
+ isMatcher_MatcherList_Predicate_SinglePredicate_Matcher()
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct {
+ ValueMatch *StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct {
+ CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() {
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() {
+}
+
+type Matcher_MatcherList_Predicate_PredicateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"`
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() {
+ *x = Matcher_MatcherList_Predicate_PredicateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1}
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate {
+ if x != nil {
+ return x.Predicate
+ }
+ return nil
+}
+
+type Matcher_MatcherTree_MatchMap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Matcher_MatcherTree_MatchMap) Reset() {
+ *x = Matcher_MatcherTree_MatchMap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherTree_MatchMap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {}
+
+func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch {
+ if x != nil {
+ return x.Map
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_matcher_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12,
+ 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12,
+ 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69,
+ 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52,
+ 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65,
+ 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69,
+ 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65,
+ 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e,
+ 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a,
+ 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a,
+ 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a,
+ 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04,
+ 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a,
+ 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f,
+ 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54,
+ 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d,
+ 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a,
+ 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61,
+ 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65,
+ 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72,
+ 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c,
+ 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a,
+ 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_matcher_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_matcher_proto_rawDescData = file_xds_type_matcher_v3_matcher_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_matcher_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_matcher_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_matcher_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_matcher_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_xds_type_matcher_v3_matcher_proto_goTypes = []interface{}{
+ (*Matcher)(nil), // 0: xds.type.matcher.v3.Matcher
+ (*Matcher_OnMatch)(nil), // 1: xds.type.matcher.v3.Matcher.OnMatch
+ (*Matcher_MatcherList)(nil), // 2: xds.type.matcher.v3.Matcher.MatcherList
+ (*Matcher_MatcherTree)(nil), // 3: xds.type.matcher.v3.Matcher.MatcherTree
+ (*Matcher_MatcherList_Predicate)(nil), // 4: xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ (*Matcher_MatcherList_FieldMatcher)(nil), // 5: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher
+ (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 6: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate
+ (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 7: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ (*Matcher_MatcherTree_MatchMap)(nil), // 8: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ nil, // 9: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry
+ (*v3.TypedExtensionConfig)(nil), // 10: xds.core.v3.TypedExtensionConfig
+ (*StringMatcher)(nil), // 11: xds.type.matcher.v3.StringMatcher
+}
+var file_xds_type_matcher_v3_matcher_proto_depIdxs = []int32{
+ 2, // 0: xds.type.matcher.v3.Matcher.matcher_list:type_name -> xds.type.matcher.v3.Matcher.MatcherList
+ 3, // 1: xds.type.matcher.v3.Matcher.matcher_tree:type_name -> xds.type.matcher.v3.Matcher.MatcherTree
+ 1, // 2: xds.type.matcher.v3.Matcher.on_no_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 0, // 3: xds.type.matcher.v3.Matcher.OnMatch.matcher:type_name -> xds.type.matcher.v3.Matcher
+ 10, // 4: xds.type.matcher.v3.Matcher.OnMatch.action:type_name -> xds.core.v3.TypedExtensionConfig
+ 5, // 5: xds.type.matcher.v3.Matcher.MatcherList.matchers:type_name -> xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher
+ 10, // 6: xds.type.matcher.v3.Matcher.MatcherTree.input:type_name -> xds.core.v3.TypedExtensionConfig
+ 8, // 7: xds.type.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ 8, // 8: xds.type.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ 10, // 9: xds.type.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> xds.core.v3.TypedExtensionConfig
+ 6, // 10: xds.type.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate
+ 7, // 11: xds.type.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ 7, // 12: xds.type.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ 4, // 13: xds.type.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 4, // 14: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 1, // 15: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 10, // 16: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> xds.core.v3.TypedExtensionConfig
+ 11, // 17: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> xds.type.matcher.v3.StringMatcher
+ 10, // 18: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> xds.core.v3.TypedExtensionConfig
+ 4, // 19: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 9, // 20: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry
+ 1, // 21: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 22, // [22:22] is the sub-list for method output_type
+ 22, // [22:22] is the sub-list for method input_type
+ 22, // [22:22] is the sub-list for extension type_name
+ 22, // [22:22] is the sub-list for extension extendee
+ 0, // [0:22] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_matcher_proto_init() }
+func file_xds_type_matcher_v3_matcher_proto_init() {
+ if File_xds_type_matcher_v3_matcher_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_string_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_OnMatch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherTree); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_FieldMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherTree_MatchMap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_)(nil),
+ (*Matcher_MatcherTree_)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Matcher_OnMatch_Matcher)(nil),
+ (*Matcher_OnMatch_Action)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*Matcher_MatcherTree_ExactMatchMap)(nil),
+ (*Matcher_MatcherTree_PrefixMatchMap)(nil),
+ (*Matcher_MatcherTree_CustomMatch)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil),
+ (*Matcher_MatcherList_Predicate_OrMatcher)(nil),
+ (*Matcher_MatcherList_Predicate_AndMatcher)(nil),
+ (*Matcher_MatcherList_Predicate_NotMatcher)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil),
+ (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_matcher_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 10,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_matcher_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_matcher_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_matcher_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_matcher_proto = out.File
+ file_xds_type_matcher_v3_matcher_proto_rawDesc = nil
+ file_xds_type_matcher_v3_matcher_proto_goTypes = nil
+ file_xds_type_matcher_v3_matcher_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go
new file mode 100644
index 000000000..60b721f5f
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go
@@ -0,0 +1,1913 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/matcher.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Matcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in MatcherMultiError, or nil if none found.
+func (m *Matcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetOnNoMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.MatcherType.(type) {
+ case *Matcher_MatcherList_:
+ if v == nil {
+ err := MatcherValidationError{
+ field: "MatcherType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatcherList()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_:
+ if v == nil {
+ err := MatcherValidationError{
+ field: "MatcherType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatcherTree()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return MatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// MatcherMultiError is an error wrapping multiple validation errors returned
+// by Matcher.ValidateAll() if the designated constraints aren't met.
+type MatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MatcherMultiError) AllErrors() []error { return m }
+
+// MatcherValidationError is the validation error returned by Matcher.Validate
+// if the designated constraints aren't met.
+type MatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MatcherValidationError{}
+
+// Validate checks the field values on Matcher_OnMatch with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_OnMatch) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_OnMatch with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_OnMatchMultiError, or nil if none found.
+func (m *Matcher_OnMatch) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_OnMatch) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofOnMatchPresent := false
+ switch v := m.OnMatch.(type) {
+ case *Matcher_OnMatch_Matcher:
+ if v == nil {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofOnMatchPresent = true
+
+ if all {
+ switch v := interface{}(m.GetMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_OnMatch_Action:
+ if v == nil {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofOnMatchPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAction()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofOnMatchPresent {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_OnMatchMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_OnMatchMultiError is an error wrapping multiple validation errors
+// returned by Matcher_OnMatch.ValidateAll() if the designated constraints
+// aren't met.
+type Matcher_OnMatchMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_OnMatchMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_OnMatchMultiError) AllErrors() []error { return m }
+
+// Matcher_OnMatchValidationError is the validation error returned by
+// Matcher_OnMatch.Validate if the designated constraints aren't met.
+type Matcher_OnMatchValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_OnMatchValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_OnMatchValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_OnMatchValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_OnMatchValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Matcher_OnMatchValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_OnMatch.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_OnMatchValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_OnMatchValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherListMultiError, or nil if none found.
+func (m *Matcher_MatcherList) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetMatchers()) < 1 {
+ err := Matcher_MatcherListValidationError{
+ field: "Matchers",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherListMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherListMultiError is an error wrapping multiple validation
+// errors returned by Matcher_MatcherList.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherListMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherListMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherListMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherListValidationError is the validation error returned by
+// Matcher_MatcherList.Validate if the designated constraints aren't met.
+type Matcher_MatcherListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherListValidationError) ErrorName() string {
+ return "Matcher_MatcherListValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherListValidationError{}
+
+// Validate checks the field values on Matcher_MatcherTree with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherTree) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherTree with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherTreeMultiError, or nil if none found.
+func (m *Matcher_MatcherTree) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherTree) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetInput() == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetInput()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ oneofTreeTypePresent := false
+ switch v := m.TreeType.(type) {
+ case *Matcher_MatcherTree_ExactMatchMap:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetExactMatchMap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_PrefixMatchMap:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetPrefixMatchMap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_CustomMatch:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustomMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofTreeTypePresent {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherTreeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherTreeMultiError is an error wrapping multiple validation
+// errors returned by Matcher_MatcherTree.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherTreeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherTreeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherTreeValidationError is the validation error returned by
+// Matcher_MatcherTree.Validate if the designated constraints aren't met.
+type Matcher_MatcherTreeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherTreeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherTreeValidationError) ErrorName() string {
+ return "Matcher_MatcherTreeValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherTreeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherTree.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherTreeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherTreeValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList_Predicate with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList_Predicate with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherList_PredicateMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofMatchTypePresent := false
+ switch v := m.MatchType.(type) {
+ case *Matcher_MatcherList_Predicate_SinglePredicate_:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetSinglePredicate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_OrMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetOrMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_AndMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetAndMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_NotMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetNotMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatchTypePresent {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_PredicateMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple
+// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll()
+// if the designated constraints aren't met.
+type Matcher_MatcherList_PredicateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_PredicateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_PredicateValidationError is the validation error
+// returned by Matcher_MatcherList_Predicate.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_PredicateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string {
+ return "Matcher_MatcherList_PredicateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_PredicateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_PredicateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_PredicateValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList_FieldMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *Matcher_MatcherList_FieldMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found.
+func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetPredicate() == nil {
+ err := Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetPredicate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if m.GetOnMatch() == nil {
+ err := Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_FieldMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple
+// validation errors returned by
+// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherList_FieldMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_FieldMatcherValidationError is the validation error
+// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_FieldMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string {
+ return "Matcher_MatcherList_FieldMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_FieldMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_FieldMatcherValidationError{}
+
+// Validate checks the field values on
+// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetInput() == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetInput()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ oneofMatcherPresent := false
+ switch v := m.Matcher.(type) {
+ case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch:
+ if v == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatcherPresent = true
+
+ if all {
+ switch v := interface{}(m.GetValueMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch:
+ if v == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatcherPresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustomMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatcherPresent {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping
+// multiple validation errors returned by
+// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the
+// designated constraints aren't met.
+type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the
+// validation error returned by
+// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string {
+ return "Matcher_MatcherList_Predicate_SinglePredicateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{}
+
+// Validate checks the field values on
+// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetPredicate()) < 2 {
+ err := Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: "Predicate",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetPredicate() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_Predicate_PredicateListMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping
+// multiple validation errors returned by
+// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherList_Predicate_PredicateListMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation
+// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if
+// the designated constraints aren't met.
+type Matcher_MatcherList_Predicate_PredicateListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string {
+ return "Matcher_MatcherList_Predicate_PredicateListValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_Predicate_PredicateListValidationError{}
+
+// Validate checks the field values on Matcher_MatcherTree_MatchMap with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherTree_MatchMap) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherTree_MatchMapMultiError, or nil if none found.
+func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetMap()) < 1 {
+ err := Matcher_MatcherTree_MatchMapValidationError{
+ field: "Map",
+ reason: "value must contain at least 1 pair(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ {
+ sorted_keys := make([]string, len(m.GetMap()))
+ i := 0
+ for key := range m.GetMap() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetMap()[key]
+ _ = val
+
+ // no validation rules for Map[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherTree_MatchMapMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple
+// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if
+// the designated constraints aren't met.
+type Matcher_MatcherTree_MatchMapMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherTree_MatchMapMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherTree_MatchMapValidationError is the validation error returned
+// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints
+// aren't met.
+type Matcher_MatcherTree_MatchMapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string {
+ return "Matcher_MatcherTree_MatchMapValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherTree_MatchMapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherTree_MatchMapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherTree_MatchMapValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go
new file mode 100644
index 000000000..bc811ecb2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go
@@ -0,0 +1,539 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/range.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Int64RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*Int64RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *Int64RangeMatcher) Reset() {
+ *x = Int64RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64RangeMatcher) ProtoMessage() {}
+
+func (x *Int64RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int64RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Int64RangeMatcher) GetRangeMatchers() []*Int64RangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type Int32RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*Int32RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *Int32RangeMatcher) Reset() {
+ *x = Int32RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32RangeMatcher) ProtoMessage() {}
+
+func (x *Int32RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int32RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Int32RangeMatcher) GetRangeMatchers() []*Int32RangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type DoubleRangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*DoubleRangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *DoubleRangeMatcher) Reset() {
+ *x = DoubleRangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRangeMatcher) ProtoMessage() {}
+
+func (x *DoubleRangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRangeMatcher.ProtoReflect.Descriptor instead.
+func (*DoubleRangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRangeMatcher) GetRangeMatchers() []*DoubleRangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type Int64RangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.Int64Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) Reset() {
+ *x = Int64RangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64RangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *Int64RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int64RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) GetRanges() []*v3.Int64Range {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type Int32RangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.Int32Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) Reset() {
+ *x = Int32RangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32RangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *Int32RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int32RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) GetRanges() []*v3.Int32Range {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type DoubleRangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.DoubleRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) Reset() {
+ *x = DoubleRangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *DoubleRangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*DoubleRangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) GetRanges() []*v3.DoubleRange {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_range_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_range_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x01, 0x0a, 0x11,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01,
+ 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39,
+ 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74,
+ 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xfc, 0x01, 0x0a, 0x11, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, 0x0a,
+ 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, 0x0a,
+ 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01,
+ 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xff, 0x01, 0x0a, 0x12, 0x44, 0x6f,
+ 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x5b, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8b, 0x01,
+ 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3a,
+ 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02,
+ 0x08, 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x5a, 0x0a, 0x1e, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_range_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_range_proto_rawDescData = file_xds_type_matcher_v3_range_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_range_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_range_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_range_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_range_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_xds_type_matcher_v3_range_proto_goTypes = []interface{}{
+ (*Int64RangeMatcher)(nil), // 0: xds.type.matcher.v3.Int64RangeMatcher
+ (*Int32RangeMatcher)(nil), // 1: xds.type.matcher.v3.Int32RangeMatcher
+ (*DoubleRangeMatcher)(nil), // 2: xds.type.matcher.v3.DoubleRangeMatcher
+ (*Int64RangeMatcher_RangeMatcher)(nil), // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher
+ (*Int32RangeMatcher_RangeMatcher)(nil), // 4: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher
+ (*DoubleRangeMatcher_RangeMatcher)(nil), // 5: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher
+ (*v3.Int64Range)(nil), // 6: xds.type.v3.Int64Range
+ (*Matcher_OnMatch)(nil), // 7: xds.type.matcher.v3.Matcher.OnMatch
+ (*v3.Int32Range)(nil), // 8: xds.type.v3.Int32Range
+ (*v3.DoubleRange)(nil), // 9: xds.type.v3.DoubleRange
+}
+var file_xds_type_matcher_v3_range_proto_depIdxs = []int32{
+ 3, // 0: xds.type.matcher.v3.Int64RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher
+ 4, // 1: xds.type.matcher.v3.Int32RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher
+ 5, // 2: xds.type.matcher.v3.DoubleRangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher
+ 6, // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int64Range
+ 7, // 4: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 8, // 5: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int32Range
+ 7, // 6: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 9, // 7: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.DoubleRange
+ 7, // 8: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_range_proto_init() }
+func file_xds_type_matcher_v3_range_proto_init() {
+ if File_xds_type_matcher_v3_range_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64RangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32RangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_range_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_range_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_range_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_range_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_range_proto = out.File
+ file_xds_type_matcher_v3_range_proto_rawDesc = nil
+ file_xds_type_matcher_v3_range_proto_goTypes = nil
+ file_xds_type_matcher_v3_range_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go
new file mode 100644
index 000000000..8cb598643
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go
@@ -0,0 +1,975 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/range.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Int64RangeMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Int64RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64RangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Int64RangeMatcherMultiError, or nil if none found.
+func (m *Int64RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Int64RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by Int64RangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type Int64RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int64RangeMatcherValidationError is the validation error returned by
+// Int64RangeMatcher.Validate if the designated constraints aren't met.
+type Int64RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeMatcherValidationError) ErrorName() string {
+ return "Int64RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int64RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeMatcherValidationError{}
+
+// Validate checks the field values on Int32RangeMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Int32RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32RangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Int32RangeMatcherMultiError, or nil if none found.
+func (m *Int32RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Int32RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by Int32RangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type Int32RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int32RangeMatcherValidationError is the validation error returned by
+// Int32RangeMatcher.Validate if the designated constraints aren't met.
+type Int32RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeMatcherValidationError) ErrorName() string {
+ return "Int32RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int32RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeMatcherValidationError{}
+
+// Validate checks the field values on DoubleRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DoubleRangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// DoubleRangeMatcherMultiError, or nil if none found.
+func (m *DoubleRangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return DoubleRangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by DoubleRangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type DoubleRangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMatcherMultiError) AllErrors() []error { return m }
+
+// DoubleRangeMatcherValidationError is the validation error returned by
+// DoubleRangeMatcher.Validate if the designated constraints aren't met.
+type DoubleRangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeMatcherValidationError) ErrorName() string {
+ return "DoubleRangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeMatcherValidationError{}
+
+// Validate checks the field values on Int64RangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Int64RangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64RangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Int64RangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *Int64RangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64RangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := Int64RangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Int64RangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by Int64RangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type Int64RangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int64RangeMatcher_RangeMatcherValidationError is the validation error
+// returned by Int64RangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type Int64RangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "Int64RangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int64RangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64RangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeMatcher_RangeMatcherValidationError{}
+
+// Validate checks the field values on Int32RangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Int32RangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32RangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Int32RangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *Int32RangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32RangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := Int32RangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Int32RangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by Int32RangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type Int32RangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int32RangeMatcher_RangeMatcherValidationError is the validation error
+// returned by Int32RangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type Int32RangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "Int32RangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int32RangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32RangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeMatcher_RangeMatcherValidationError{}
+
+// Validate checks the field values on DoubleRangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DoubleRangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// DoubleRangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *DoubleRangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return DoubleRangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by DoubleRangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type DoubleRangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// DoubleRangeMatcher_RangeMatcherValidationError is the validation error
+// returned by DoubleRangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type DoubleRangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "DoubleRangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeMatcher_RangeMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go
new file mode 100644
index 000000000..c02ec2a91
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/regex.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type RegexMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to EngineType:
+ //
+ // *RegexMatcher_GoogleRe2
+ EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"`
+ Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"`
+}
+
+func (x *RegexMatcher) Reset() {
+ *x = RegexMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher) ProtoMessage() {}
+
+func (x *RegexMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead.
+func (*RegexMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType {
+ if m != nil {
+ return m.EngineType
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 {
+ if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok {
+ return x.GoogleRe2
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetRegex() string {
+ if x != nil {
+ return x.Regex
+ }
+ return ""
+}
+
+type isRegexMatcher_EngineType interface {
+ isRegexMatcher_EngineType()
+}
+
+type RegexMatcher_GoogleRe2 struct {
+ GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"`
+}
+
+func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {}
+
+type RegexMatcher_GoogleRE2 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RegexMatcher_GoogleRE2) Reset() {
+ *x = RegexMatcher_GoogleRE2{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher_GoogleRE2) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher_GoogleRE2) ProtoMessage() {}
+
+func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead.
+func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0}
+}
+
+var File_xds_type_matcher_v3_regex_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_regex_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xa6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x56, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45,
+ 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65,
+ 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x0b, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x52, 0x45, 0x32, 0x42, 0x12, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x5a, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65,
+ 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_regex_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_regex_proto_rawDescData = file_xds_type_matcher_v3_regex_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_regex_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_regex_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_regex_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_regex_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_regex_proto_goTypes = []interface{}{
+ (*RegexMatcher)(nil), // 0: xds.type.matcher.v3.RegexMatcher
+ (*RegexMatcher_GoogleRE2)(nil), // 1: xds.type.matcher.v3.RegexMatcher.GoogleRE2
+}
+var file_xds_type_matcher_v3_regex_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.RegexMatcher.google_re2:type_name -> xds.type.matcher.v3.RegexMatcher.GoogleRE2
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_regex_proto_init() }
+func file_xds_type_matcher_v3_regex_proto_init() {
+ if File_xds_type_matcher_v3_regex_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher_GoogleRE2); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*RegexMatcher_GoogleRe2)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_regex_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_regex_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_regex_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_regex_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_regex_proto = out.File
+ file_xds_type_matcher_v3_regex_proto_rawDesc = nil
+ file_xds_type_matcher_v3_regex_proto_goTypes = nil
+ file_xds_type_matcher_v3_regex_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go
new file mode 100644
index 000000000..8b7682964
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go
@@ -0,0 +1,317 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/regex.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on RegexMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RegexMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RegexMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RegexMatcherMultiError, or
+// nil if none found.
+func (m *RegexMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RegexMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetRegex()) < 1 {
+ err := RegexMatcherValidationError{
+ field: "Regex",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofEngineTypePresent := false
+ switch v := m.EngineType.(type) {
+ case *RegexMatcher_GoogleRe2:
+ if v == nil {
+ err := RegexMatcherValidationError{
+ field: "EngineType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofEngineTypePresent = true
+
+ if m.GetGoogleRe2() == nil {
+ err := RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetGoogleRe2()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofEngineTypePresent {
+ err := RegexMatcherValidationError{
+ field: "EngineType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RegexMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// RegexMatcherMultiError is an error wrapping multiple validation errors
+// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met.
+type RegexMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RegexMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RegexMatcherMultiError) AllErrors() []error { return m }
+
+// RegexMatcherValidationError is the validation error returned by
+// RegexMatcher.Validate if the designated constraints aren't met.
+type RegexMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RegexMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcherValidationError{}
+
+// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RegexMatcher_GoogleRE2) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RegexMatcher_GoogleRE2MultiError, or nil if none found.
+func (m *RegexMatcher_GoogleRE2) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RegexMatcher_GoogleRE2) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return RegexMatcher_GoogleRE2MultiError(errors)
+ }
+
+ return nil
+}
+
+// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation
+// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated
+// constraints aren't met.
+type RegexMatcher_GoogleRE2MultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RegexMatcher_GoogleRE2MultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m }
+
+// RegexMatcher_GoogleRE2ValidationError is the validation error returned by
+// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met.
+type RegexMatcher_GoogleRE2ValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string {
+ return "RegexMatcher_GoogleRE2ValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RegexMatcher_GoogleRE2ValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher_GoogleRE2.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcher_GoogleRE2ValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcher_GoogleRE2ValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go
new file mode 100644
index 000000000..79b70bcb7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go
@@ -0,0 +1,353 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/string.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type StringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchPattern:
+ //
+ // *StringMatcher_Exact
+ // *StringMatcher_Prefix
+ // *StringMatcher_Suffix
+ // *StringMatcher_SafeRegex
+ // *StringMatcher_Contains
+ // *StringMatcher_Custom
+ MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+ IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"`
+}
+
+func (x *StringMatcher) Reset() {
+ *x = StringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringMatcher) ProtoMessage() {}
+
+func (x *StringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead.
+func (*StringMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetExact() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok {
+ return x.Exact
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetPrefix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok {
+ return x.Prefix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSuffix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok {
+ return x.Suffix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSafeRegex() *RegexMatcher {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok {
+ return x.SafeRegex
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetContains() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok {
+ return x.Contains
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetIgnoreCase() bool {
+ if x != nil {
+ return x.IgnoreCase
+ }
+ return false
+}
+
+type isStringMatcher_MatchPattern interface {
+ isStringMatcher_MatchPattern()
+}
+
+type StringMatcher_Exact struct {
+ Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"`
+}
+
+type StringMatcher_Prefix struct {
+ Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"`
+}
+
+type StringMatcher_Suffix struct {
+ Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"`
+}
+
+type StringMatcher_SafeRegex struct {
+ SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"`
+}
+
+type StringMatcher_Contains struct {
+ Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"`
+}
+
+type StringMatcher_Custom struct {
+ Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
+}
+
+func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {}
+
+type ListStringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"`
+}
+
+func (x *ListStringMatcher) Reset() {
+ *x = ListStringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListStringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListStringMatcher) ProtoMessage() {}
+
+func (x *ListStringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead.
+func (*ListStringMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListStringMatcher) GetPatterns() []*StringMatcher {
+ if x != nil {
+ return x.Patterns
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_string_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_string_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6,
+ 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
+ 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73,
+ 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4c,
+ 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48,
+ 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73,
+ 0x65, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65,
+ 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x5d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x08,
+ 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x42, 0x5b, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_string_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_string_proto_rawDescData = file_xds_type_matcher_v3_string_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_string_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_string_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_string_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_string_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_string_proto_goTypes = []interface{}{
+ (*StringMatcher)(nil), // 0: xds.type.matcher.v3.StringMatcher
+ (*ListStringMatcher)(nil), // 1: xds.type.matcher.v3.ListStringMatcher
+ (*RegexMatcher)(nil), // 2: xds.type.matcher.v3.RegexMatcher
+ (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig
+}
+var file_xds_type_matcher_v3_string_proto_depIdxs = []int32{
+ 2, // 0: xds.type.matcher.v3.StringMatcher.safe_regex:type_name -> xds.type.matcher.v3.RegexMatcher
+ 3, // 1: xds.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig
+ 0, // 2: xds.type.matcher.v3.ListStringMatcher.patterns:type_name -> xds.type.matcher.v3.StringMatcher
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_string_proto_init() }
+func file_xds_type_matcher_v3_string_proto_init() {
+ if File_xds_type_matcher_v3_string_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_regex_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListStringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*StringMatcher_Exact)(nil),
+ (*StringMatcher_Prefix)(nil),
+ (*StringMatcher_Suffix)(nil),
+ (*StringMatcher_SafeRegex)(nil),
+ (*StringMatcher_Contains)(nil),
+ (*StringMatcher_Custom)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_string_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_string_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_string_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_string_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_string_proto = out.File
+ file_xds_type_matcher_v3_string_proto_rawDesc = nil
+ file_xds_type_matcher_v3_string_proto_goTypes = nil
+ file_xds_type_matcher_v3_string_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go
new file mode 100644
index 000000000..339d3b631
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go
@@ -0,0 +1,481 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/string.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on StringMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *StringMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StringMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in StringMatcherMultiError, or
+// nil if none found.
+func (m *StringMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StringMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for IgnoreCase
+
+ oneofMatchPatternPresent := false
+ switch v := m.MatchPattern.(type) {
+ case *StringMatcher_Exact:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+ // no validation rules for Exact
+ case *StringMatcher_Prefix:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetPrefix()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Prefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_Suffix:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetSuffix()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Suffix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_SafeRegex:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if m.GetSafeRegex() == nil {
+ err := StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetSafeRegex()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *StringMatcher_Contains:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetContains()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Contains",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_Custom:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatchPatternPresent {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return StringMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// StringMatcherMultiError is an error wrapping multiple validation errors
+// returned by StringMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type StringMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StringMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StringMatcherMultiError) AllErrors() []error { return m }
+
+// StringMatcherValidationError is the validation error returned by
+// StringMatcher.Validate if the designated constraints aren't met.
+type StringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StringMatcherValidationError{}
+
+// Validate checks the field values on ListStringMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ListStringMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListStringMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListStringMatcherMultiError, or nil if none found.
+func (m *ListStringMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListStringMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetPatterns()) < 1 {
+ err := ListStringMatcherValidationError{
+ field: "Patterns",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetPatterns() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListStringMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListStringMatcherMultiError is an error wrapping multiple validation errors
+// returned by ListStringMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type ListStringMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListStringMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListStringMatcherMultiError) AllErrors() []error { return m }
+
+// ListStringMatcherValidationError is the validation error returned by
+// ListStringMatcher.Validate if the designated constraints aren't met.
+type ListStringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListStringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListStringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListStringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListStringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListStringMatcherValidationError) ErrorName() string {
+ return "ListStringMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListStringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListStringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListStringMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go
new file mode 100644
index 000000000..98c13d9b2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go
@@ -0,0 +1,340 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/cel.proto
+
+package v3
+
+import (
+ expr "cel.dev/expr"
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CelExpression struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ExprSpecifier:
+ //
+ // *CelExpression_ParsedExpr
+ // *CelExpression_CheckedExpr
+ ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"`
+ CelExprParsed *expr.ParsedExpr `protobuf:"bytes,3,opt,name=cel_expr_parsed,json=celExprParsed,proto3" json:"cel_expr_parsed,omitempty"`
+ CelExprChecked *expr.CheckedExpr `protobuf:"bytes,4,opt,name=cel_expr_checked,json=celExprChecked,proto3" json:"cel_expr_checked,omitempty"`
+ CelExprString string `protobuf:"bytes,5,opt,name=cel_expr_string,json=celExprString,proto3" json:"cel_expr_string,omitempty"`
+}
+
+func (x *CelExpression) Reset() {
+ *x = CelExpression{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelExpression) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelExpression) ProtoMessage() {}
+
+func (x *CelExpression) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelExpression.ProtoReflect.Descriptor instead.
+func (*CelExpression) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *CelExpression) GetExprSpecifier() isCelExpression_ExprSpecifier {
+ if m != nil {
+ return m.ExprSpecifier
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr {
+ if x, ok := x.GetExprSpecifier().(*CelExpression_ParsedExpr); ok {
+ return x.ParsedExpr
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr {
+ if x, ok := x.GetExprSpecifier().(*CelExpression_CheckedExpr); ok {
+ return x.CheckedExpr
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprParsed() *expr.ParsedExpr {
+ if x != nil {
+ return x.CelExprParsed
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprChecked() *expr.CheckedExpr {
+ if x != nil {
+ return x.CelExprChecked
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprString() string {
+ if x != nil {
+ return x.CelExprString
+ }
+ return ""
+}
+
+type isCelExpression_ExprSpecifier interface {
+ isCelExpression_ExprSpecifier()
+}
+
+type CelExpression_ParsedExpr struct {
+ // Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+ ParsedExpr *v1alpha1.ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"`
+}
+
+type CelExpression_CheckedExpr struct {
+ // Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+ CheckedExpr *v1alpha1.CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"`
+}
+
+func (*CelExpression_ParsedExpr) isCelExpression_ExprSpecifier() {}
+
+func (*CelExpression_CheckedExpr) isCelExpression_ExprSpecifier() {}
+
+type CelExtractString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"`
+ DefaultValue *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+}
+
+func (x *CelExtractString) Reset() {
+ *x = CelExtractString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelExtractString) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelExtractString) ProtoMessage() {}
+
+func (x *CelExtractString) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelExtractString.ProtoReflect.Descriptor instead.
+func (*CelExtractString) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CelExtractString) GetExprExtract() *CelExpression {
+ if x != nil {
+ return x.ExprExtract
+ }
+ return nil
+}
+
+func (x *CelExtractString) GetDefaultValue() *wrapperspb.StringValue {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+var File_xds_type_v3_cel_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_cel_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65,
+ 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x63, 0x65, 0x6c,
+ 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a,
+ 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4b,
+ 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50,
+ 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52,
+ 0x0a, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0c, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3c, 0x0a, 0x0f, 0x63,
+ 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45,
+ 0x78, 0x70, 0x72, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x65, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x45,
+ 0x78, 0x70, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x65,
+ 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69,
+ 0x66, 0x69, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70,
+ 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65,
+ 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61,
+ 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a,
+ 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_cel_proto_rawDescOnce sync.Once
+ file_xds_type_v3_cel_proto_rawDescData = file_xds_type_v3_cel_proto_rawDesc
+)
+
+func file_xds_type_v3_cel_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_cel_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_cel_proto_rawDescData)
+ })
+ return file_xds_type_v3_cel_proto_rawDescData
+}
+
+var file_xds_type_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_v3_cel_proto_goTypes = []interface{}{
+ (*CelExpression)(nil), // 0: xds.type.v3.CelExpression
+ (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString
+ (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr
+ (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr
+ (*expr.ParsedExpr)(nil), // 4: cel.expr.ParsedExpr
+ (*expr.CheckedExpr)(nil), // 5: cel.expr.CheckedExpr
+ (*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue
+}
+var file_xds_type_v3_cel_proto_depIdxs = []int32{
+ 2, // 0: xds.type.v3.CelExpression.parsed_expr:type_name -> google.api.expr.v1alpha1.ParsedExpr
+ 3, // 1: xds.type.v3.CelExpression.checked_expr:type_name -> google.api.expr.v1alpha1.CheckedExpr
+ 4, // 2: xds.type.v3.CelExpression.cel_expr_parsed:type_name -> cel.expr.ParsedExpr
+ 5, // 3: xds.type.v3.CelExpression.cel_expr_checked:type_name -> cel.expr.CheckedExpr
+ 0, // 4: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression
+ 6, // 5: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_cel_proto_init() }
+func file_xds_type_v3_cel_proto_init() {
+ if File_xds_type_v3_cel_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelExpression); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_cel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelExtractString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_v3_cel_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*CelExpression_ParsedExpr)(nil),
+ (*CelExpression_CheckedExpr)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_cel_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_cel_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_cel_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_cel_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_cel_proto = out.File
+ file_xds_type_v3_cel_proto_rawDesc = nil
+ file_xds_type_v3_cel_proto_goTypes = nil
+ file_xds_type_v3_cel_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go
new file mode 100644
index 000000000..2643709be
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go
@@ -0,0 +1,452 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/cel.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CelExpression with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CelExpression) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelExpression with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CelExpressionMultiError, or
+// nil if none found.
+func (m *CelExpression) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelExpression) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetCelExprParsed()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCelExprParsed()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetCelExprChecked()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCelExprChecked()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for CelExprString
+
+ switch v := m.ExprSpecifier.(type) {
+ case *CelExpression_ParsedExpr:
+ if v == nil {
+ err := CelExpressionValidationError{
+ field: "ExprSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetParsedExpr()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParsedExpr()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *CelExpression_CheckedExpr:
+ if v == nil {
+ err := CelExpressionValidationError{
+ field: "ExprSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetCheckedExpr()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCheckedExpr()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return CelExpressionMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelExpressionMultiError is an error wrapping multiple validation errors
+// returned by CelExpression.ValidateAll() if the designated constraints
+// aren't met.
+type CelExpressionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelExpressionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelExpressionMultiError) AllErrors() []error { return m }
+
+// CelExpressionValidationError is the validation error returned by
+// CelExpression.Validate if the designated constraints aren't met.
+type CelExpressionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelExpressionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelExpressionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelExpressionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelExpressionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelExpressionValidationError) ErrorName() string { return "CelExpressionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelExpressionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelExpression.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelExpressionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelExpressionValidationError{}
+
+// Validate checks the field values on CelExtractString with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CelExtractString) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelExtractString with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CelExtractStringMultiError, or nil if none found.
+func (m *CelExtractString) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelExtractString) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetExprExtract() == nil {
+ err := CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExprExtract()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExprExtract()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CelExtractStringMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelExtractStringMultiError is an error wrapping multiple validation errors
+// returned by CelExtractString.ValidateAll() if the designated constraints
+// aren't met.
+type CelExtractStringMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelExtractStringMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelExtractStringMultiError) AllErrors() []error { return m }
+
+// CelExtractStringValidationError is the validation error returned by
+// CelExtractString.Validate if the designated constraints aren't met.
+type CelExtractStringValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelExtractStringValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelExtractStringValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelExtractStringValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelExtractStringValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelExtractStringValidationError) ErrorName() string { return "CelExtractStringValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelExtractStringValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelExtractString.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelExtractStringValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelExtractStringValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go
new file mode 100644
index 000000000..c6f8bb9ba
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go
@@ -0,0 +1,298 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/range.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Int64Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int64Range) Reset() {
+ *x = Int64Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Range) ProtoMessage() {}
+
+func (x *Int64Range) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead.
+func (*Int64Range) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Int64Range) GetStart() int64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int64Range) GetEnd() int64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+type Int32Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int32Range) Reset() {
+ *x = Int32Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Range) ProtoMessage() {}
+
+func (x *Int32Range) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead.
+func (*Int32Range) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Int32Range) GetStart() int32 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int32Range) GetEnd() int32 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+type DoubleRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"`
+ End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *DoubleRange) Reset() {
+ *x = DoubleRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRange) ProtoMessage() {}
+
+func (x *DoubleRange) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead.
+func (*DoubleRange) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRange) GetStart() float64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *DoubleRange) GetEnd() float64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+var File_xds_type_v3_range_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_range_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x0a,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x4a, 0x0a, 0x16, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_range_proto_rawDescOnce sync.Once
+ file_xds_type_v3_range_proto_rawDescData = file_xds_type_v3_range_proto_rawDesc
+)
+
+func file_xds_type_v3_range_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_range_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_range_proto_rawDescData)
+ })
+ return file_xds_type_v3_range_proto_rawDescData
+}
+
+var file_xds_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_xds_type_v3_range_proto_goTypes = []interface{}{
+ (*Int64Range)(nil), // 0: xds.type.v3.Int64Range
+ (*Int32Range)(nil), // 1: xds.type.v3.Int32Range
+ (*DoubleRange)(nil), // 2: xds.type.v3.DoubleRange
+}
+var file_xds_type_v3_range_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_range_proto_init() }
+func file_xds_type_v3_range_proto_init() {
+ if File_xds_type_v3_range_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_range_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_range_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_range_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_range_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_range_proto = out.File
+ file_xds_type_v3_range_proto_rawDesc = nil
+ file_xds_type_v3_range_proto_goTypes = nil
+ file_xds_type_v3_range_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go
new file mode 100644
index 000000000..ccaf418e5
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go
@@ -0,0 +1,345 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/range.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Int64Range with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Int64Range) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64Range with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in Int64RangeMultiError, or
+// nil if none found.
+func (m *Int64Range) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64Range) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return Int64RangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMultiError is an error wrapping multiple validation errors
+// returned by Int64Range.ValidateAll() if the designated constraints aren't met.
+type Int64RangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMultiError) AllErrors() []error { return m }
+
+// Int64RangeValidationError is the validation error returned by
+// Int64Range.Validate if the designated constraints aren't met.
+type Int64RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int64RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeValidationError{}
+
+// Validate checks the field values on Int32Range with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Int32Range) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32Range with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in Int32RangeMultiError, or
+// nil if none found.
+func (m *Int32Range) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32Range) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return Int32RangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMultiError is an error wrapping multiple validation errors
+// returned by Int32Range.ValidateAll() if the designated constraints aren't met.
+type Int32RangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMultiError) AllErrors() []error { return m }
+
+// Int32RangeValidationError is the validation error returned by
+// Int32Range.Validate if the designated constraints aren't met.
+type Int32RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int32RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeValidationError{}
+
+// Validate checks the field values on DoubleRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *DoubleRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in DoubleRangeMultiError, or
+// nil if none found.
+func (m *DoubleRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRange) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return DoubleRangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMultiError is an error wrapping multiple validation errors
+// returned by DoubleRange.ValidateAll() if the designated constraints aren't met.
+type DoubleRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMultiError) AllErrors() []error { return m }
+
+// DoubleRangeValidationError is the validation error returned by
+// DoubleRange.Validate if the designated constraints aren't met.
+type DoubleRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go
new file mode 100644
index 000000000..ba42cb0e8
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go
@@ -0,0 +1,163 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/typed_struct.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *TypedStruct) Reset() {
+ *x = TypedStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedStruct) ProtoMessage() {}
+
+func (x *TypedStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead.
+func (*TypedStruct) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_typed_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedStruct) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *TypedStruct) GetValue() *structpb.Struct {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_xds_type_v3_typed_struct_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_typed_struct_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73,
+ 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0b, 0x54,
+ 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79,
+ 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79,
+ 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10,
+ 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_typed_struct_proto_rawDescOnce sync.Once
+ file_xds_type_v3_typed_struct_proto_rawDescData = file_xds_type_v3_typed_struct_proto_rawDesc
+)
+
+func file_xds_type_v3_typed_struct_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_typed_struct_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_typed_struct_proto_rawDescData)
+ })
+ return file_xds_type_v3_typed_struct_proto_rawDescData
+}
+
+var file_xds_type_v3_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_v3_typed_struct_proto_goTypes = []interface{}{
+ (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct
+ (*structpb.Struct)(nil), // 1: google.protobuf.Struct
+}
+var file_xds_type_v3_typed_struct_proto_depIdxs = []int32{
+ 1, // 0: xds.type.v3.TypedStruct.value:type_name -> google.protobuf.Struct
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_typed_struct_proto_init() }
+func file_xds_type_v3_typed_struct_proto_init() {
+ if File_xds_type_v3_typed_struct_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_typed_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_typed_struct_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_typed_struct_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_typed_struct_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_typed_struct_proto = out.File
+ file_xds_type_v3_typed_struct_proto_rawDesc = nil
+ file_xds_type_v3_typed_struct_proto_goTypes = nil
+ file_xds_type_v3_typed_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go
new file mode 100644
index 000000000..f39bce906
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go
@@ -0,0 +1,166 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/typed_struct.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TypedStruct) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TypedStructMultiError, or
+// nil if none found.
+func (m *TypedStruct) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedStruct) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TypeUrl
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TypedStructMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedStructMultiError is an error wrapping multiple validation errors
+// returned by TypedStruct.ValidateAll() if the designated constraints aren't met.
+type TypedStructMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedStructMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedStructMultiError) AllErrors() []error { return m }
+
+// TypedStructValidationError is the validation error returned by
+// TypedStruct.Validate if the designated constraints aren't met.
+type TypedStructValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedStructValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedStructValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedStructValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedStructValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TypedStructValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedStruct.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedStructValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedStructValidationError{}
diff --git a/vendor/github.com/emicklei/go-restful/v3/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml
deleted file mode 100644
index 3a0bf5ff1..000000000
--- a/vendor/github.com/emicklei/go-restful/v3/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - 1.x
-
-before_install:
- - go test -v
-
-script:
- - go test -race -coverprofile=coverage.txt -covermode=atomic
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 92b78048e..4fcd920ab 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,5 +1,12 @@
# Change history of go-restful
+## [v3.13.0] - 2025-08-14
+
+- optimize performance of path matching in CurlyRouter ( thanks @wenhuang, Wen Huang)
+
+## [v3.12.2] - 2025-02-21
+
+- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
## [v3.12.1] - 2024-05-28
@@ -18,7 +25,7 @@
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
-## [v3.12.0] - 2023-08-19
+## [v3.11.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index 7234604e4..50a79ab69 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -3,7 +3,7 @@ go-restful
package for building REST-style Web Services using Google Go
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
-[](https://pkg.go.dev/github.com/emicklei/go-restful)
+[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
[](https://codecov.io/gh/emicklei/go-restful)
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
@@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- Configurable (trace) logging
- Customizable gzip/deflate readers and writers using CompressorProvider registration
- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function
+- Added `SetPathTokenCacheEnabled` and `SetCustomVerbCacheEnabled` to disable regexp caching (default=true)
## How to customize
There are several hooks to customize the behavior of the go-restful package.
diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go
index 6fd2bcd5a..eec43bfd0 100644
--- a/vendor/github.com/emicklei/go-restful/v3/curly.go
+++ b/vendor/github.com/emicklei/go-restful/v3/curly.go
@@ -9,11 +9,35 @@ import (
"regexp"
"sort"
"strings"
+ "sync"
)
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
type CurlyRouter struct{}
+var (
+ regexCache sync.Map // Cache for compiled regex patterns
+ pathTokenCacheEnabled = true // Enable/disable path token regex caching
+)
+
+// SetPathTokenCacheEnabled enables or disables path token regex caching for CurlyRouter.
+// When disabled, regex patterns will be compiled on every request.
+// When enabled (default), compiled regex patterns are cached for better performance.
+func SetPathTokenCacheEnabled(enabled bool) {
+ pathTokenCacheEnabled = enabled
+}
+
+// getCachedRegexp retrieves a compiled regex from the cache if found and valid.
+// Returns the regex and true if found and valid, nil and false otherwise.
+func getCachedRegexp(cache *sync.Map, pattern string) (*regexp.Regexp, bool) {
+ if cached, found := cache.Load(pattern); found {
+ if regex, ok := cached.(*regexp.Regexp); ok {
+ return regex, true
+ }
+ }
+ return nil, false
+}
+
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (c CurlyRouter) SelectRoute(
@@ -113,8 +137,28 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque
}
return true, true
}
- matched, err := regexp.MatchString(regPart, requestToken)
- return (matched && err == nil), false
+
+ // Check cache first (if enabled)
+ if pathTokenCacheEnabled {
+ if regex, found := getCachedRegexp(®exCache, regPart); found {
+ matched := regex.MatchString(requestToken)
+ return matched, false
+ }
+ }
+
+ // Compile the regex
+ regex, err := regexp.Compile(regPart)
+ if err != nil {
+ return false, false
+ }
+
+ // Cache the regex (if enabled)
+ if pathTokenCacheEnabled {
+ regexCache.Store(regPart, regex)
+ }
+
+ matched := regex.MatchString(requestToken)
+ return matched, false
}
var jsr311Router = RouterJSR311{}
@@ -168,7 +212,7 @@ func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens
if matchesToken {
score++ // extra score for regex match
}
- }
+ }
} else {
// not a parameter
if eachRequestToken != eachRouteToken {
diff --git a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go
index bfc17efde..0b98eeb09 100644
--- a/vendor/github.com/emicklei/go-restful/v3/custom_verb.go
+++ b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go
@@ -1,14 +1,28 @@
package restful
+// Copyright 2025 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
import (
"fmt"
"regexp"
+ "sync"
)
var (
- customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
+ customVerbReg = regexp.MustCompile(":([A-Za-z]+)$")
+ customVerbCache sync.Map // Cache for compiled custom verb regexes
+ customVerbCacheEnabled = true // Enable/disable custom verb regex caching
)
+// SetCustomVerbCacheEnabled enables or disables custom verb regex caching.
+// When disabled, custom verb regex patterns will be compiled on every request.
+// When enabled (default), compiled custom verb regex patterns are cached for better performance.
+func SetCustomVerbCacheEnabled(enabled bool) {
+ customVerbCacheEnabled = enabled
+}
+
func hasCustomVerb(routeToken string) bool {
return customVerbReg.MatchString(routeToken)
}
@@ -20,7 +34,23 @@ func isMatchCustomVerb(routeToken string, pathToken string) bool {
}
customVerb := rs[1]
- specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb))
+ regexPattern := fmt.Sprintf(":%s$", customVerb)
+
+ // Check cache first (if enabled)
+ if customVerbCacheEnabled {
+ if specificVerbReg, found := getCachedRegexp(&customVerbCache, regexPattern); found {
+ return specificVerbReg.MatchString(pathToken)
+ }
+ }
+
+ // Compile the regex
+ specificVerbReg := regexp.MustCompile(regexPattern)
+
+ // Cache the regex (if enabled)
+ if customVerbCacheEnabled {
+ customVerbCache.Store(regexPattern, specificVerbReg)
+ }
+
return specificVerbReg.MatchString(pathToken)
}
diff --git a/vendor/github.com/emicklei/go-restful/v3/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go
index 69b13057d..80809225b 100644
--- a/vendor/github.com/emicklei/go-restful/v3/doc.go
+++ b/vendor/github.com/emicklei/go-restful/v3/doc.go
@@ -1,7 +1,7 @@
/*
Package restful , a lean package for creating REST-style WebServices without magic.
-WebServices and Routes
+### WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
@@ -30,14 +30,14 @@ The (*Request, *Response) arguments provide functions for reading information fr
See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation.
-Regular expression matching Routes
+### Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
-Containers
+### Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
@@ -47,7 +47,7 @@ You can create your own Container and create a new http.Server for that particul
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
-Filters
+### Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
@@ -60,22 +60,21 @@ Use the following statement to pass the request,response pair to the next filter
chain.ProcessFilter(req, resp)
-Container Filters
+### Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
-WebService Filters
+### WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
-
-Route Filters
+### Route Filters
These are processed before calling the function associated with the Route.
@@ -84,7 +83,7 @@ These are processed before calling the function associated with the Route.
See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations.
-Response Encoding
+### Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
@@ -95,20 +94,20 @@ Alternatively, you can create a Filter that performs the encoding and install it
See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go
-OPTIONS support
+### OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
-CORS
+### CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
-Error Handling
+### Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
@@ -137,11 +136,11 @@ The request does not have or has an unknown Accept Header set for this operation
The request does not have or has an unknown Content-Type Header set for this operation.
-ServiceError
+### ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
-Performance options
+### Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
@@ -156,30 +155,27 @@ Default value is true
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
-Trouble shooting
+### Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
-Logging
+### Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
-Resources
+### Resources
-[project]: https://github.com/emicklei/go-restful
+(c) 2012-2025, http://ernestmicklei.com. MIT License
+[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
-
-[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
-
+[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
-
-(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
index a9b3faaa8..7f04bd905 100644
--- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
@@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
return params
}
-// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
candidates := make([]*Route, 0, 8)
for i, each := range routes {
@@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
}
- if httpRequest.ContentLength > 0 {
- return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
- }
+ return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
@@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
for _, candidate := range previous {
available = append(available, candidate.Produces...)
}
- // if POST,PUT,PATCH without body
- method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
- if (method == http.MethodPost ||
- method == http.MethodPut ||
- method == http.MethodPatch) && (length == "" || length == "0") {
- return nil, NewError(
- http.StatusUnsupportedMediaType,
- fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
- }
return nil, NewError(
http.StatusNotAcceptable,
- fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
- )
+ fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return candidates[0], nil
diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go
index 306c44be7..a2056e2ac 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+// If the route does not specify Consumes then return true (*/*).
+// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE b/vendor/github.com/envoyproxy/go-control-plane/envoy/LICENSE
similarity index 100%
rename from vendor/sigs.k8s.io/structured-merge-diff/v4/LICENSE
rename to vendor/github.com/envoyproxy/go-control-plane/envoy/LICENSE
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go
new file mode 100644
index 000000000..b2872bfb3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go
@@ -0,0 +1,607 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of certificate details. Admin endpoint uses this wrapper for “/certs“ to
+// display certificate information. See :ref:`/certs ` for more
+// information.
+type Certificates struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of certificates known to an Envoy.
+ Certificates []*Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"`
+}
+
+func (x *Certificates) Reset() {
+ *x = Certificates{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificates) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificates) ProtoMessage() {}
+
+func (x *Certificates) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificates.ProtoReflect.Descriptor instead.
+func (*Certificates) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Certificates) GetCertificates() []*Certificate {
+ if x != nil {
+ return x.Certificates
+ }
+ return nil
+}
+
+type Certificate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Details of CA certificate.
+ CaCert []*CertificateDetails `protobuf:"bytes,1,rep,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"`
+ // Details of Certificate Chain
+ CertChain []*CertificateDetails `protobuf:"bytes,2,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"`
+}
+
+func (x *Certificate) Reset() {
+ *x = Certificate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificate) ProtoMessage() {}
+
+func (x *Certificate) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
+func (*Certificate) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Certificate) GetCaCert() []*CertificateDetails {
+ if x != nil {
+ return x.CaCert
+ }
+ return nil
+}
+
+func (x *Certificate) GetCertChain() []*CertificateDetails {
+ if x != nil {
+ return x.CertChain
+ }
+ return nil
+}
+
+// [#next-free-field: 8]
+type CertificateDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path of the certificate.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // Certificate Serial Number.
+ SerialNumber string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+ // List of Subject Alternate names.
+ SubjectAltNames []*SubjectAlternateName `protobuf:"bytes,3,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"`
+ // Minimum of days until expiration of certificate and it's chain.
+ DaysUntilExpiration uint64 `protobuf:"varint,4,opt,name=days_until_expiration,json=daysUntilExpiration,proto3" json:"days_until_expiration,omitempty"`
+ // Indicates the time from which the certificate is valid.
+ ValidFrom *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"`
+ // Indicates the time at which the certificate expires.
+ ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"`
+ // Details related to the OCSP response associated with this certificate, if any.
+ OcspDetails *CertificateDetails_OcspDetails `protobuf:"bytes,7,opt,name=ocsp_details,json=ocspDetails,proto3" json:"ocsp_details,omitempty"`
+}
+
+func (x *CertificateDetails) Reset() {
+ *x = CertificateDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateDetails) ProtoMessage() {}
+
+func (x *CertificateDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateDetails.ProtoReflect.Descriptor instead.
+func (*CertificateDetails) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CertificateDetails) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *CertificateDetails) GetSerialNumber() string {
+ if x != nil {
+ return x.SerialNumber
+ }
+ return ""
+}
+
+func (x *CertificateDetails) GetSubjectAltNames() []*SubjectAlternateName {
+ if x != nil {
+ return x.SubjectAltNames
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetDaysUntilExpiration() uint64 {
+ if x != nil {
+ return x.DaysUntilExpiration
+ }
+ return 0
+}
+
+func (x *CertificateDetails) GetValidFrom() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ValidFrom
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetExpirationTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpirationTime
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetOcspDetails() *CertificateDetails_OcspDetails {
+ if x != nil {
+ return x.OcspDetails
+ }
+ return nil
+}
+
+type SubjectAlternateName struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Subject Alternate Name.
+ //
+ // Types that are assignable to Name:
+ //
+ // *SubjectAlternateName_Dns
+ // *SubjectAlternateName_Uri
+ // *SubjectAlternateName_IpAddress
+ Name isSubjectAlternateName_Name `protobuf_oneof:"name"`
+}
+
+func (x *SubjectAlternateName) Reset() {
+ *x = SubjectAlternateName{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubjectAlternateName) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubjectAlternateName) ProtoMessage() {}
+
+func (x *SubjectAlternateName) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubjectAlternateName.ProtoReflect.Descriptor instead.
+func (*SubjectAlternateName) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{3}
+}
+
+func (m *SubjectAlternateName) GetName() isSubjectAlternateName_Name {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (x *SubjectAlternateName) GetDns() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_Dns); ok {
+ return x.Dns
+ }
+ return ""
+}
+
+func (x *SubjectAlternateName) GetUri() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_Uri); ok {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *SubjectAlternateName) GetIpAddress() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_IpAddress); ok {
+ return x.IpAddress
+ }
+ return ""
+}
+
+type isSubjectAlternateName_Name interface {
+ isSubjectAlternateName_Name()
+}
+
+type SubjectAlternateName_Dns struct {
+ Dns string `protobuf:"bytes,1,opt,name=dns,proto3,oneof"`
+}
+
+type SubjectAlternateName_Uri struct {
+ Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
+}
+
+type SubjectAlternateName_IpAddress struct {
+ IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3,oneof"`
+}
+
+func (*SubjectAlternateName_Dns) isSubjectAlternateName_Name() {}
+
+func (*SubjectAlternateName_Uri) isSubjectAlternateName_Name() {}
+
+func (*SubjectAlternateName_IpAddress) isSubjectAlternateName_Name() {}
+
+type CertificateDetails_OcspDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Indicates the time from which the OCSP response is valid.
+ ValidFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"`
+ // Indicates the time at which the OCSP response expires.
+ Expiration *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"`
+}
+
+func (x *CertificateDetails_OcspDetails) Reset() {
+ *x = CertificateDetails_OcspDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateDetails_OcspDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateDetails_OcspDetails) ProtoMessage() {}
+
+func (x *CertificateDetails_OcspDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateDetails_OcspDetails.ProtoReflect.Descriptor instead.
+func (*CertificateDetails_OcspDetails) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ValidFrom
+ }
+ return nil
+}
+
+func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Expiration
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_certs_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_certs_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x78, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12,
+ 0x3f, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0b, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f,
+ 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x06,
+ 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63,
+ 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x09,
+ 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21,
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x22, 0xdc, 0x04, 0x0a, 0x12, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d,
+ 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x75, 0x6e, 0x74, 0x69,
+ 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x45, 0x78, 0x70,
+ 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72,
+ 0x6f, 0x6d, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x5f,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x73, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0b, 0x6f,
+ 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0b, 0x4f,
+ 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x98, 0x01, 0x0a, 0x14, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x12, 0x0a,
+ 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72,
+ 0x69, 0x12, 0x1f, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53,
+ 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x73, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x43, 0x65, 0x72, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_certs_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_certs_proto_rawDescData = file_envoy_admin_v3_certs_proto_rawDesc
+)
+
+func file_envoy_admin_v3_certs_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_certs_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_certs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_certs_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_certs_proto_rawDescData
+}
+
+var file_envoy_admin_v3_certs_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_envoy_admin_v3_certs_proto_goTypes = []interface{}{
+ (*Certificates)(nil), // 0: envoy.admin.v3.Certificates
+ (*Certificate)(nil), // 1: envoy.admin.v3.Certificate
+ (*CertificateDetails)(nil), // 2: envoy.admin.v3.CertificateDetails
+ (*SubjectAlternateName)(nil), // 3: envoy.admin.v3.SubjectAlternateName
+ (*CertificateDetails_OcspDetails)(nil), // 4: envoy.admin.v3.CertificateDetails.OcspDetails
+ (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
+}
+var file_envoy_admin_v3_certs_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Certificates.certificates:type_name -> envoy.admin.v3.Certificate
+ 2, // 1: envoy.admin.v3.Certificate.ca_cert:type_name -> envoy.admin.v3.CertificateDetails
+ 2, // 2: envoy.admin.v3.Certificate.cert_chain:type_name -> envoy.admin.v3.CertificateDetails
+ 3, // 3: envoy.admin.v3.CertificateDetails.subject_alt_names:type_name -> envoy.admin.v3.SubjectAlternateName
+ 5, // 4: envoy.admin.v3.CertificateDetails.valid_from:type_name -> google.protobuf.Timestamp
+ 5, // 5: envoy.admin.v3.CertificateDetails.expiration_time:type_name -> google.protobuf.Timestamp
+ 4, // 6: envoy.admin.v3.CertificateDetails.ocsp_details:type_name -> envoy.admin.v3.CertificateDetails.OcspDetails
+ 5, // 7: envoy.admin.v3.CertificateDetails.OcspDetails.valid_from:type_name -> google.protobuf.Timestamp
+ 5, // 8: envoy.admin.v3.CertificateDetails.OcspDetails.expiration:type_name -> google.protobuf.Timestamp
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_certs_proto_init() }
+func file_envoy_admin_v3_certs_proto_init() {
+ if File_envoy_admin_v3_certs_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_certs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificates); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubjectAlternateName); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateDetails_OcspDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*SubjectAlternateName_Dns)(nil),
+ (*SubjectAlternateName_Uri)(nil),
+ (*SubjectAlternateName_IpAddress)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_certs_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_certs_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_certs_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_certs_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_certs_proto = out.File
+ file_envoy_admin_v3_certs_proto_rawDesc = nil
+ file_envoy_admin_v3_certs_proto_goTypes = nil
+ file_envoy_admin_v3_certs_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go
new file mode 100644
index 000000000..413895689
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go
@@ -0,0 +1,870 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Certificates with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Certificates) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Certificates with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CertificatesMultiError, or
+// nil if none found.
+func (m *Certificates) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Certificates) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetCertificates() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CertificatesMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificatesMultiError is an error wrapping multiple validation errors
+// returned by Certificates.ValidateAll() if the designated constraints aren't met.
+type CertificatesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificatesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificatesMultiError) AllErrors() []error { return m }
+
+// CertificatesValidationError is the validation error returned by
+// Certificates.Validate if the designated constraints aren't met.
+type CertificatesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificatesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificatesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificatesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificatesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificatesValidationError) ErrorName() string { return "CertificatesValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CertificatesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificates.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificatesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificatesValidationError{}
+
+// Validate checks the field values on Certificate with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Certificate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Certificate with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CertificateMultiError, or
+// nil if none found.
+func (m *Certificate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Certificate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetCaCert() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetCertChain() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CertificateMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateMultiError is an error wrapping multiple validation errors
+// returned by Certificate.ValidateAll() if the designated constraints aren't met.
+type CertificateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateMultiError) AllErrors() []error { return m }
+
+// CertificateValidationError is the validation error returned by
+// Certificate.Validate if the designated constraints aren't met.
+type CertificateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateValidationError) ErrorName() string { return "CertificateValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CertificateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateValidationError{}
+
+// Validate checks the field values on CertificateDetails with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CertificateDetails) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CertificateDetails with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CertificateDetailsMultiError, or nil if none found.
+func (m *CertificateDetails) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CertificateDetails) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Path
+
+ // no validation rules for SerialNumber
+
+ for idx, item := range m.GetSubjectAltNames() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for DaysUntilExpiration
+
+ if all {
+ switch v := interface{}(m.GetValidFrom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetExpirationTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExpirationTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetOcspDetails()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOcspDetails()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CertificateDetailsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateDetailsMultiError is an error wrapping multiple validation errors
+// returned by CertificateDetails.ValidateAll() if the designated constraints
+// aren't met.
+type CertificateDetailsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateDetailsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateDetailsMultiError) AllErrors() []error { return m }
+
+// CertificateDetailsValidationError is the validation error returned by
+// CertificateDetails.Validate if the designated constraints aren't met.
+type CertificateDetailsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateDetailsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateDetailsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateDetailsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateDetailsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateDetailsValidationError) ErrorName() string {
+ return "CertificateDetailsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CertificateDetailsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificateDetails.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateDetailsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateDetailsValidationError{}
+
+// Validate checks the field values on SubjectAlternateName with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SubjectAlternateName) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SubjectAlternateName with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SubjectAlternateNameMultiError, or nil if none found.
+func (m *SubjectAlternateName) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SubjectAlternateName) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ switch v := m.Name.(type) {
+ case *SubjectAlternateName_Dns:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for Dns
+ case *SubjectAlternateName_Uri:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for Uri
+ case *SubjectAlternateName_IpAddress:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for IpAddress
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return SubjectAlternateNameMultiError(errors)
+ }
+
+ return nil
+}
+
+// SubjectAlternateNameMultiError is an error wrapping multiple validation
+// errors returned by SubjectAlternateName.ValidateAll() if the designated
+// constraints aren't met.
+type SubjectAlternateNameMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SubjectAlternateNameMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SubjectAlternateNameMultiError) AllErrors() []error { return m }
+
+// SubjectAlternateNameValidationError is the validation error returned by
+// SubjectAlternateName.Validate if the designated constraints aren't met.
+type SubjectAlternateNameValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SubjectAlternateNameValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SubjectAlternateNameValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SubjectAlternateNameValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SubjectAlternateNameValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SubjectAlternateNameValidationError) ErrorName() string {
+ return "SubjectAlternateNameValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SubjectAlternateNameValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSubjectAlternateName.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SubjectAlternateNameValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SubjectAlternateNameValidationError{}
+
+// Validate checks the field values on CertificateDetails_OcspDetails with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CertificateDetails_OcspDetails) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CertificateDetails_OcspDetails with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// CertificateDetails_OcspDetailsMultiError, or nil if none found.
+func (m *CertificateDetails_OcspDetails) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CertificateDetails_OcspDetails) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetValidFrom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetExpiration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExpiration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CertificateDetails_OcspDetailsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateDetails_OcspDetailsMultiError is an error wrapping multiple
+// validation errors returned by CertificateDetails_OcspDetails.ValidateAll()
+// if the designated constraints aren't met.
+type CertificateDetails_OcspDetailsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateDetails_OcspDetailsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateDetails_OcspDetailsMultiError) AllErrors() []error { return m }
+
+// CertificateDetails_OcspDetailsValidationError is the validation error
+// returned by CertificateDetails_OcspDetails.Validate if the designated
+// constraints aren't met.
+type CertificateDetails_OcspDetailsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateDetails_OcspDetailsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateDetails_OcspDetailsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateDetails_OcspDetailsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateDetails_OcspDetailsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateDetails_OcspDetailsValidationError) ErrorName() string {
+ return "CertificateDetails_OcspDetailsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CertificateDetails_OcspDetailsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificateDetails_OcspDetails.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateDetails_OcspDetailsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateDetails_OcspDetailsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go
new file mode 100644
index 000000000..3c325787d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go
@@ -0,0 +1,504 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Certificates) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Certificates) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Certificates) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Certificates) > 0 {
+ for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Certificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Certificate) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Certificate) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Certificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.CertChain) > 0 {
+ for iNdEx := len(m.CertChain) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.CertChain[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.CaCert) > 0 {
+ for iNdEx := len(m.CaCert) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.CaCert[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Expiration != nil {
+ size, err := (*timestamppb.Timestamp)(m.Expiration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ValidFrom != nil {
+ size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateDetails) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateDetails) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CertificateDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.OcspDetails != nil {
+ size, err := m.OcspDetails.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.ExpirationTime != nil {
+ size, err := (*timestamppb.Timestamp)(m.ExpirationTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.ValidFrom != nil {
+ size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DaysUntilExpiration != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DaysUntilExpiration))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.SubjectAltNames) > 0 {
+ for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.SubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.SerialNumber) > 0 {
+ i -= len(m.SerialNumber)
+ copy(dAtA[i:], m.SerialNumber)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SerialNumber)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAlternateName) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectAlternateName) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_IpAddress); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_Uri); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_Dns); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAlternateName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.Dns)
+ copy(dAtA[i:], m.Dns)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+func (m *SubjectAlternateName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.Uri)
+ copy(dAtA[i:], m.Uri)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
+}
+func (m *SubjectAlternateName_IpAddress) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_IpAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.IpAddress)
+ copy(dAtA[i:], m.IpAddress)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.IpAddress)))
+ i--
+ dAtA[i] = 0x1a
+ return len(dAtA) - i, nil
+}
+func (m *Certificates) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Certificates) > 0 {
+ for _, e := range m.Certificates {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Certificate) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.CaCert) > 0 {
+ for _, e := range m.CaCert {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.CertChain) > 0 {
+ for _, e := range m.CertChain {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CertificateDetails_OcspDetails) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ValidFrom != nil {
+ l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Expiration != nil {
+ l = (*timestamppb.Timestamp)(m.Expiration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CertificateDetails) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.SerialNumber)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.SubjectAltNames) > 0 {
+ for _, e := range m.SubjectAltNames {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.DaysUntilExpiration != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.DaysUntilExpiration))
+ }
+ if m.ValidFrom != nil {
+ l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ExpirationTime != nil {
+ l = (*timestamppb.Timestamp)(m.ExpirationTime).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OcspDetails != nil {
+ l = m.OcspDetails.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SubjectAlternateName) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.Name.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SubjectAlternateName_Dns) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Dns)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *SubjectAlternateName_Uri) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Uri)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *SubjectAlternateName_IpAddress) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IpAddress)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go
new file mode 100644
index 000000000..ee2239572
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go
@@ -0,0 +1,744 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Admin endpoint uses this wrapper for “/clusters“ to display cluster status information.
+// See :ref:`/clusters ` for more information.
+type Clusters struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Mapping from cluster name to each cluster's status.
+ ClusterStatuses []*ClusterStatus `protobuf:"bytes,1,rep,name=cluster_statuses,json=clusterStatuses,proto3" json:"cluster_statuses,omitempty"`
+}
+
+func (x *Clusters) Reset() {
+ *x = Clusters{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Clusters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Clusters) ProtoMessage() {}
+
+func (x *Clusters) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Clusters.ProtoReflect.Descriptor instead.
+func (*Clusters) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Clusters) GetClusterStatuses() []*ClusterStatus {
+ if x != nil {
+ return x.ClusterStatuses
+ }
+ return nil
+}
+
+// Details an individual cluster's current status.
+// [#next-free-field: 9]
+type ClusterStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the cluster.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Denotes whether this cluster was added via API or configured statically.
+ AddedViaApi bool `protobuf:"varint,2,opt,name=added_via_api,json=addedViaApi,proto3" json:"added_via_api,omitempty"`
+ // The success rate threshold used in the last interval.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “false“, all errors: externally and locally generated were used to calculate the threshold.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“, only externally generated errors were used to calculate the threshold.
+ // The threshold is used to eject hosts based on their success rate. See
+ // :ref:`Cluster outlier detection ` documentation for details.
+ //
+ // Note: this field may be omitted in any of the three following cases:
+ //
+ // 1. There were not enough hosts with enough request volume to proceed with success rate based
+ // outlier ejection.
+ // 2. The threshold is computed to be < 0 because a negative value implies that there was no
+ // threshold for that interval.
+ // 3. Outlier detection is not enabled for this cluster.
+ SuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,3,opt,name=success_rate_ejection_threshold,json=successRateEjectionThreshold,proto3" json:"success_rate_ejection_threshold,omitempty"`
+ // Mapping from host address to the host's current status.
+ HostStatuses []*HostStatus `protobuf:"bytes,4,rep,name=host_statuses,json=hostStatuses,proto3" json:"host_statuses,omitempty"`
+ // The success rate threshold used in the last interval when only locally originated failures were
+ // taken into account and externally originated errors were treated as success.
+ // This field should be interpreted only when
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“. The threshold is used to eject hosts based on their success rate.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: this field may be omitted in any of the three following cases:
+ //
+ // 1. There were not enough hosts with enough request volume to proceed with success rate based
+ // outlier ejection.
+ // 2. The threshold is computed to be < 0 because a negative value implies that there was no
+ // threshold for that interval.
+ // 3. Outlier detection is not enabled for this cluster.
+ LocalOriginSuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,5,opt,name=local_origin_success_rate_ejection_threshold,json=localOriginSuccessRateEjectionThreshold,proto3" json:"local_origin_success_rate_ejection_threshold,omitempty"`
+ // :ref:`Circuit breaking ` settings of the cluster.
+ CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"`
+ // Observability name of the cluster.
+ ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"`
+ // The :ref:`EDS service name ` if the cluster is an EDS cluster.
+ EdsServiceName string `protobuf:"bytes,8,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"`
+}
+
+func (x *ClusterStatus) Reset() {
+ *x = ClusterStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterStatus) ProtoMessage() {}
+
+func (x *ClusterStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead.
+func (*ClusterStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ClusterStatus) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ClusterStatus) GetAddedViaApi() bool {
+ if x != nil {
+ return x.AddedViaApi
+ }
+ return false
+}
+
+func (x *ClusterStatus) GetSuccessRateEjectionThreshold() *v3.Percent {
+ if x != nil {
+ return x.SuccessRateEjectionThreshold
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetHostStatuses() []*HostStatus {
+ if x != nil {
+ return x.HostStatuses
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetLocalOriginSuccessRateEjectionThreshold() *v3.Percent {
+ if x != nil {
+ return x.LocalOriginSuccessRateEjectionThreshold
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetCircuitBreakers() *v31.CircuitBreakers {
+ if x != nil {
+ return x.CircuitBreakers
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetObservabilityName() string {
+ if x != nil {
+ return x.ObservabilityName
+ }
+ return ""
+}
+
+func (x *ClusterStatus) GetEdsServiceName() string {
+ if x != nil {
+ return x.EdsServiceName
+ }
+ return ""
+}
+
+// Current state of a particular host.
+// [#next-free-field: 10]
+type HostStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Address of this host.
+ Address *v32.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ // List of stats specific to this host.
+ Stats []*SimpleMetric `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty"`
+ // The host's current health status.
+ HealthStatus *HostHealthStatus `protobuf:"bytes,3,opt,name=health_status,json=healthStatus,proto3" json:"health_status,omitempty"`
+ // Request success rate for this host over the last calculated interval.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “false“, all errors: externally and locally generated were used in success rate
+ // calculation. If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“, only externally generated errors were used in success rate calculation.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: the message will not be present if host did not have enough request volume to calculate
+ // success rate or the cluster did not have enough hosts to run through success rate outlier
+ // ejection.
+ SuccessRate *v3.Percent `protobuf:"bytes,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"`
+ // The host's weight. If not configured, the value defaults to 1.
+ Weight uint32 `protobuf:"varint,5,opt,name=weight,proto3" json:"weight,omitempty"`
+ // The hostname of the host, if applicable.
+ Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // The host's priority. If not configured, the value defaults to 0 (highest priority).
+ Priority uint32 `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"`
+ // Request success rate for this host over the last calculated
+ // interval when only locally originated errors are taken into account and externally originated
+ // errors were treated as success.
+ // This field should be interpreted only when
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: the message will not be present if host did not have enough request volume to calculate
+ // success rate or the cluster did not have enough hosts to run through success rate outlier
+ // ejection.
+ LocalOriginSuccessRate *v3.Percent `protobuf:"bytes,8,opt,name=local_origin_success_rate,json=localOriginSuccessRate,proto3" json:"local_origin_success_rate,omitempty"`
+ // locality of the host.
+ Locality *v32.Locality `protobuf:"bytes,9,opt,name=locality,proto3" json:"locality,omitempty"`
+}
+
+func (x *HostStatus) Reset() {
+ *x = HostStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HostStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HostStatus) ProtoMessage() {}
+
+func (x *HostStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HostStatus.ProtoReflect.Descriptor instead.
+func (*HostStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *HostStatus) GetAddress() *v32.Address {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *HostStatus) GetStats() []*SimpleMetric {
+ if x != nil {
+ return x.Stats
+ }
+ return nil
+}
+
+func (x *HostStatus) GetHealthStatus() *HostHealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return nil
+}
+
+func (x *HostStatus) GetSuccessRate() *v3.Percent {
+ if x != nil {
+ return x.SuccessRate
+ }
+ return nil
+}
+
+func (x *HostStatus) GetWeight() uint32 {
+ if x != nil {
+ return x.Weight
+ }
+ return 0
+}
+
+func (x *HostStatus) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *HostStatus) GetPriority() uint32 {
+ if x != nil {
+ return x.Priority
+ }
+ return 0
+}
+
+func (x *HostStatus) GetLocalOriginSuccessRate() *v3.Percent {
+ if x != nil {
+ return x.LocalOriginSuccessRate
+ }
+ return nil
+}
+
+func (x *HostStatus) GetLocality() *v32.Locality {
+ if x != nil {
+ return x.Locality
+ }
+ return nil
+}
+
+// Health status for a host.
+// [#next-free-field: 9]
+type HostHealthStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The host is currently failing active health checks.
+ FailedActiveHealthCheck bool `protobuf:"varint,1,opt,name=failed_active_health_check,json=failedActiveHealthCheck,proto3" json:"failed_active_health_check,omitempty"`
+ // The host is currently considered an outlier and has been ejected.
+ FailedOutlierCheck bool `protobuf:"varint,2,opt,name=failed_outlier_check,json=failedOutlierCheck,proto3" json:"failed_outlier_check,omitempty"`
+ // The host is currently being marked as degraded through active health checking.
+ FailedActiveDegradedCheck bool `protobuf:"varint,4,opt,name=failed_active_degraded_check,json=failedActiveDegradedCheck,proto3" json:"failed_active_degraded_check,omitempty"`
+ // The host has been removed from service discovery, but is being stabilized due to active
+ // health checking.
+ PendingDynamicRemoval bool `protobuf:"varint,5,opt,name=pending_dynamic_removal,json=pendingDynamicRemoval,proto3" json:"pending_dynamic_removal,omitempty"`
+ // The host has not yet been health checked.
+ PendingActiveHc bool `protobuf:"varint,6,opt,name=pending_active_hc,json=pendingActiveHc,proto3" json:"pending_active_hc,omitempty"`
+ // The host should be excluded from panic, spillover, etc. calculations because it was explicitly
+ // taken out of rotation via protocol signal and is not meant to be routed to.
+ ExcludedViaImmediateHcFail bool `protobuf:"varint,7,opt,name=excluded_via_immediate_hc_fail,json=excludedViaImmediateHcFail,proto3" json:"excluded_via_immediate_hc_fail,omitempty"`
+ // The host failed active HC due to timeout.
+ ActiveHcTimeout bool `protobuf:"varint,8,opt,name=active_hc_timeout,json=activeHcTimeout,proto3" json:"active_hc_timeout,omitempty"`
+ // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported
+ // here.
+ // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]
+ EdsHealthStatus v32.HealthStatus `protobuf:"varint,3,opt,name=eds_health_status,json=edsHealthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"eds_health_status,omitempty"`
+}
+
+func (x *HostHealthStatus) Reset() {
+ *x = HostHealthStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HostHealthStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HostHealthStatus) ProtoMessage() {}
+
+func (x *HostHealthStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HostHealthStatus.ProtoReflect.Descriptor instead.
+func (*HostHealthStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *HostHealthStatus) GetFailedActiveHealthCheck() bool {
+ if x != nil {
+ return x.FailedActiveHealthCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetFailedOutlierCheck() bool {
+ if x != nil {
+ return x.FailedOutlierCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetFailedActiveDegradedCheck() bool {
+ if x != nil {
+ return x.FailedActiveDegradedCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetPendingDynamicRemoval() bool {
+ if x != nil {
+ return x.PendingDynamicRemoval
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetPendingActiveHc() bool {
+ if x != nil {
+ return x.PendingActiveHc
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetExcludedViaImmediateHcFail() bool {
+ if x != nil {
+ return x.ExcludedViaImmediateHcFail
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetActiveHcTimeout() bool {
+ if x != nil {
+ return x.ActiveHcTimeout
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetEdsHealthStatus() v32.HealthStatus {
+ if x != nil {
+ return x.EdsHealthStatus
+ }
+ return v32.HealthStatus(0)
+}
+
+var File_envoy_admin_v3_clusters_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62,
+ 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x08, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a,
+ 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64,
+ 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x41, 0x70, 0x69, 0x12, 0x5d,
+ 0x0a, 0x1f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52,
+ 0x1c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3f, 0x0a,
+ 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x75,
+ 0x0a, 0x2c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73,
+ 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x27, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74,
+ 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, 0x63, 0x75,
+ 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62,
+ 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04,
+ 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b,
+ 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77,
+ 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69,
+ 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50,
+ 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69,
+ 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a,
+ 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79,
+ 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64,
+ 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c,
+ 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75,
+ 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69,
+ 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65,
+ 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61,
+ 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69,
+ 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64,
+ 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49,
+ 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64,
+ 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10,
+ 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42,
+ 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_clusters_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_clusters_proto_rawDescData = file_envoy_admin_v3_clusters_proto_rawDesc
+)
+
+func file_envoy_admin_v3_clusters_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_clusters_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_clusters_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_clusters_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_clusters_proto_rawDescData
+}
+
+var file_envoy_admin_v3_clusters_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_envoy_admin_v3_clusters_proto_goTypes = []interface{}{
+ (*Clusters)(nil), // 0: envoy.admin.v3.Clusters
+ (*ClusterStatus)(nil), // 1: envoy.admin.v3.ClusterStatus
+ (*HostStatus)(nil), // 2: envoy.admin.v3.HostStatus
+ (*HostHealthStatus)(nil), // 3: envoy.admin.v3.HostHealthStatus
+ (*v3.Percent)(nil), // 4: envoy.type.v3.Percent
+ (*v31.CircuitBreakers)(nil), // 5: envoy.config.cluster.v3.CircuitBreakers
+ (*v32.Address)(nil), // 6: envoy.config.core.v3.Address
+ (*SimpleMetric)(nil), // 7: envoy.admin.v3.SimpleMetric
+ (*v32.Locality)(nil), // 8: envoy.config.core.v3.Locality
+ (v32.HealthStatus)(0), // 9: envoy.config.core.v3.HealthStatus
+}
+var file_envoy_admin_v3_clusters_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Clusters.cluster_statuses:type_name -> envoy.admin.v3.ClusterStatus
+ 4, // 1: envoy.admin.v3.ClusterStatus.success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent
+ 2, // 2: envoy.admin.v3.ClusterStatus.host_statuses:type_name -> envoy.admin.v3.HostStatus
+ 4, // 3: envoy.admin.v3.ClusterStatus.local_origin_success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent
+ 5, // 4: envoy.admin.v3.ClusterStatus.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers
+ 6, // 5: envoy.admin.v3.HostStatus.address:type_name -> envoy.config.core.v3.Address
+ 7, // 6: envoy.admin.v3.HostStatus.stats:type_name -> envoy.admin.v3.SimpleMetric
+ 3, // 7: envoy.admin.v3.HostStatus.health_status:type_name -> envoy.admin.v3.HostHealthStatus
+ 4, // 8: envoy.admin.v3.HostStatus.success_rate:type_name -> envoy.type.v3.Percent
+ 4, // 9: envoy.admin.v3.HostStatus.local_origin_success_rate:type_name -> envoy.type.v3.Percent
+ 8, // 10: envoy.admin.v3.HostStatus.locality:type_name -> envoy.config.core.v3.Locality
+ 9, // 11: envoy.admin.v3.HostHealthStatus.eds_health_status:type_name -> envoy.config.core.v3.HealthStatus
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_clusters_proto_init() }
+func file_envoy_admin_v3_clusters_proto_init() {
+ if File_envoy_admin_v3_clusters_proto != nil {
+ return
+ }
+ file_envoy_admin_v3_metrics_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_clusters_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Clusters); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HostStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HostHealthStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_clusters_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_clusters_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_clusters_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_clusters_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_clusters_proto = out.File
+ file_envoy_admin_v3_clusters_proto_rawDesc = nil
+ file_envoy_admin_v3_clusters_proto_goTypes = nil
+ file_envoy_admin_v3_clusters_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go
new file mode 100644
index 000000000..d7658a09f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go
@@ -0,0 +1,803 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.HealthStatus(0)
+)
+
+// Validate checks the field values on Clusters with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Clusters) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Clusters with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClustersMultiError, or nil
+// if none found.
+func (m *Clusters) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Clusters) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetClusterStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ClustersMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersMultiError is an error wrapping multiple validation errors returned
+// by Clusters.ValidateAll() if the designated constraints aren't met.
+type ClustersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersMultiError) AllErrors() []error { return m }
+
+// ClustersValidationError is the validation error returned by
+// Clusters.Validate if the designated constraints aren't met.
+type ClustersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersValidationError) ErrorName() string { return "ClustersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClustersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusters.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersValidationError{}
+
+// Validate checks the field values on ClusterStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ClusterStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClusterStatusMultiError, or
+// nil if none found.
+func (m *ClusterStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for AddedViaApi
+
+ if all {
+ switch v := interface{}(m.GetSuccessRateEjectionThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetHostStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetCircuitBreakers()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ObservabilityName
+
+ // no validation rules for EdsServiceName
+
+ if len(errors) > 0 {
+ return ClusterStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterStatusMultiError is an error wrapping multiple validation errors
+// returned by ClusterStatus.ValidateAll() if the designated constraints
+// aren't met.
+type ClusterStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterStatusMultiError) AllErrors() []error { return m }
+
+// ClusterStatusValidationError is the validation error returned by
+// ClusterStatus.Validate if the designated constraints aren't met.
+type ClusterStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterStatusValidationError) ErrorName() string { return "ClusterStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClusterStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterStatusValidationError{}
+
+// Validate checks the field values on HostStatus with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HostStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HostStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HostStatusMultiError, or
+// nil if none found.
+func (m *HostStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HostStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetStats() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetHealthStatus()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHealthStatus()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSuccessRate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSuccessRate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Weight
+
+ // no validation rules for Hostname
+
+ // no validation rules for Priority
+
+ if all {
+ switch v := interface{}(m.GetLocalOriginSuccessRate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalOriginSuccessRate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLocality()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return HostStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// HostStatusMultiError is an error wrapping multiple validation errors
+// returned by HostStatus.ValidateAll() if the designated constraints aren't met.
+type HostStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HostStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HostStatusMultiError) AllErrors() []error { return m }
+
+// HostStatusValidationError is the validation error returned by
+// HostStatus.Validate if the designated constraints aren't met.
+type HostStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HostStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HostStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HostStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HostStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HostStatusValidationError) ErrorName() string { return "HostStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HostStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHostStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HostStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HostStatusValidationError{}
+
+// Validate checks the field values on HostHealthStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *HostHealthStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HostHealthStatus with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HostHealthStatusMultiError, or nil if none found.
+func (m *HostHealthStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HostHealthStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for FailedActiveHealthCheck
+
+ // no validation rules for FailedOutlierCheck
+
+ // no validation rules for FailedActiveDegradedCheck
+
+ // no validation rules for PendingDynamicRemoval
+
+ // no validation rules for PendingActiveHc
+
+ // no validation rules for ExcludedViaImmediateHcFail
+
+ // no validation rules for ActiveHcTimeout
+
+ // no validation rules for EdsHealthStatus
+
+ if len(errors) > 0 {
+ return HostHealthStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// HostHealthStatusMultiError is an error wrapping multiple validation errors
+// returned by HostHealthStatus.ValidateAll() if the designated constraints
+// aren't met.
+type HostHealthStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HostHealthStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HostHealthStatusMultiError) AllErrors() []error { return m }
+
+// HostHealthStatusValidationError is the validation error returned by
+// HostHealthStatus.Validate if the designated constraints aren't met.
+type HostHealthStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HostHealthStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HostHealthStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HostHealthStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HostHealthStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HostHealthStatusValidationError) ErrorName() string { return "HostHealthStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HostHealthStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHostHealthStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HostHealthStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HostHealthStatusValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go
new file mode 100644
index 000000000..418581107
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go
@@ -0,0 +1,656 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Clusters) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Clusters) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Clusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.ClusterStatuses) > 0 {
+ for iNdEx := len(m.ClusterStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.ClusterStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.EdsServiceName) > 0 {
+ i -= len(m.EdsServiceName)
+ copy(dAtA[i:], m.EdsServiceName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EdsServiceName)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.ObservabilityName) > 0 {
+ i -= len(m.ObservabilityName)
+ copy(dAtA[i:], m.ObservabilityName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ObservabilityName)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.CircuitBreakers != nil {
+ if vtmsg, ok := interface{}(m.CircuitBreakers).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.CircuitBreakers)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.LocalOriginSuccessRateEjectionThreshold != nil {
+ if vtmsg, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalOriginSuccessRateEjectionThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.HostStatuses) > 0 {
+ for iNdEx := len(m.HostStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.HostStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.SuccessRateEjectionThreshold != nil {
+ if vtmsg, ok := interface{}(m.SuccessRateEjectionThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SuccessRateEjectionThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.AddedViaApi {
+ i--
+ if m.AddedViaApi {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HostStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HostStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Locality != nil {
+ if vtmsg, ok := interface{}(m.Locality).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Locality)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.LocalOriginSuccessRate != nil {
+ if vtmsg, ok := interface{}(m.LocalOriginSuccessRate).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalOriginSuccessRate)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Priority != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.Hostname) > 0 {
+ i -= len(m.Hostname)
+ copy(dAtA[i:], m.Hostname)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Weight != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Weight))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.SuccessRate != nil {
+ if vtmsg, ok := interface{}(m.SuccessRate).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SuccessRate)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.HealthStatus != nil {
+ size, err := m.HealthStatus.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Stats) > 0 {
+ for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Stats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Address != nil {
+ if vtmsg, ok := interface{}(m.Address).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Address)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HostHealthStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostHealthStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HostHealthStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ActiveHcTimeout {
+ i--
+ if m.ActiveHcTimeout {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.ExcludedViaImmediateHcFail {
+ i--
+ if m.ExcludedViaImmediateHcFail {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.PendingActiveHc {
+ i--
+ if m.PendingActiveHc {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.PendingDynamicRemoval {
+ i--
+ if m.PendingDynamicRemoval {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.FailedActiveDegradedCheck {
+ i--
+ if m.FailedActiveDegradedCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.EdsHealthStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EdsHealthStatus))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.FailedOutlierCheck {
+ i--
+ if m.FailedOutlierCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.FailedActiveHealthCheck {
+ i--
+ if m.FailedActiveHealthCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Clusters) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ClusterStatuses) > 0 {
+ for _, e := range m.ClusterStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AddedViaApi {
+ n += 2
+ }
+ if m.SuccessRateEjectionThreshold != nil {
+ if size, ok := interface{}(m.SuccessRateEjectionThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.SuccessRateEjectionThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.HostStatuses) > 0 {
+ for _, e := range m.HostStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LocalOriginSuccessRateEjectionThreshold != nil {
+ if size, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalOriginSuccessRateEjectionThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CircuitBreakers != nil {
+ if size, ok := interface{}(m.CircuitBreakers).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.CircuitBreakers)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ObservabilityName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.EdsServiceName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HostStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Address != nil {
+ if size, ok := interface{}(m.Address).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Address)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.Stats) > 0 {
+ for _, e := range m.Stats {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.HealthStatus != nil {
+ l = m.HealthStatus.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.SuccessRate != nil {
+ if size, ok := interface{}(m.SuccessRate).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.SuccessRate)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Weight != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Weight))
+ }
+ l = len(m.Hostname)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Priority != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority))
+ }
+ if m.LocalOriginSuccessRate != nil {
+ if size, ok := interface{}(m.LocalOriginSuccessRate).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalOriginSuccessRate)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Locality != nil {
+ if size, ok := interface{}(m.Locality).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Locality)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HostHealthStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.FailedActiveHealthCheck {
+ n += 2
+ }
+ if m.FailedOutlierCheck {
+ n += 2
+ }
+ if m.EdsHealthStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.EdsHealthStatus))
+ }
+ if m.FailedActiveDegradedCheck {
+ n += 2
+ }
+ if m.PendingDynamicRemoval {
+ n += 2
+ }
+ if m.PendingActiveHc {
+ n += 2
+ }
+ if m.ExcludedViaImmediateHcFail {
+ n += 2
+ }
+ if m.ActiveHcTimeout {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go
new file mode 100644
index 000000000..c742c74db
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go
@@ -0,0 +1,642 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The :ref:`/config_dump ` admin endpoint uses this wrapper
+// message to maintain and serve arbitrary configuration information from any component in Envoy.
+type ConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This list is serialized and dumped in its entirety at the
+ // :ref:`/config_dump ` endpoint.
+ //
+ // The following configurations are currently supported and will be dumped in the order given
+ // below:
+ //
+ // * “bootstrap“: :ref:`BootstrapConfigDump `
+ // * “clusters“: :ref:`ClustersConfigDump `
+ // * “ecds_filter_http“: :ref:`EcdsConfigDump `
+ // * “ecds_filter_quic_listener“: :ref:`EcdsConfigDump `
+ // * “ecds_filter_tcp_listener“: :ref:`EcdsConfigDump `
+ // * “endpoints“: :ref:`EndpointsConfigDump `
+ // * “listeners“: :ref:`ListenersConfigDump `
+ // * “scoped_routes“: :ref:`ScopedRoutesConfigDump `
+ // * “routes“: :ref:`RoutesConfigDump `
+ // * “secrets“: :ref:`SecretsConfigDump `
+ //
+ // EDS Configuration will only be dumped by using parameter “?include_eds“
+ //
+ // Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for
+ // either HTTP or listener filter will only be dumped if it is actually configured.
+ //
+ // You can filter output with the resource and mask query parameters.
+ // See :ref:`/config_dump?resource={} `,
+ // :ref:`/config_dump?mask={} `,
+ // or :ref:`/config_dump?resource={},mask={}
+ // ` for more information.
+ Configs []*anypb.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"`
+}
+
+func (x *ConfigDump) Reset() {
+ *x = ConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigDump) ProtoMessage() {}
+
+func (x *ConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigDump.ProtoReflect.Descriptor instead.
+func (*ConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ConfigDump) GetConfigs() []*anypb.Any {
+ if x != nil {
+ return x.Configs
+ }
+ return nil
+}
+
+// This message describes the bootstrap configuration that Envoy was started with. This includes
+// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate
+// the static portions of an Envoy configuration by reusing the output as the bootstrap
+// configuration for another Envoy.
+type BootstrapConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Bootstrap *v3.Bootstrap `protobuf:"bytes,1,opt,name=bootstrap,proto3" json:"bootstrap,omitempty"`
+ // The timestamp when the BootstrapConfig was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *BootstrapConfigDump) Reset() {
+ *x = BootstrapConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BootstrapConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BootstrapConfigDump) ProtoMessage() {}
+
+func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BootstrapConfigDump.ProtoReflect.Descriptor instead.
+func (*BootstrapConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap {
+ if x != nil {
+ return x.Bootstrap
+ }
+ return nil
+}
+
+func (x *BootstrapConfigDump) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.
+type SecretsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded secrets.
+ StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"`
+ // The dynamically loaded active secrets. These are secrets that are available to service
+ // clusters or listeners.
+ DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"`
+ // The dynamically loaded warming secrets. These are secrets that are currently undergoing
+ // warming in preparation to service clusters or listeners.
+ DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"`
+}
+
+func (x *SecretsConfigDump) Reset() {
+ *x = SecretsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump) ProtoMessage() {}
+
+func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret {
+ if x != nil {
+ return x.StaticSecrets
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret {
+ if x != nil {
+ return x.DynamicActiveSecrets
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret {
+ if x != nil {
+ return x.DynamicWarmingSecrets
+ }
+ return nil
+}
+
+// DynamicSecret contains secret information fetched via SDS.
+// [#next-free-field: 7]
+type SecretsConfigDump_DynamicSecret struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the secret.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // This is the per-resource version information.
+ VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The timestamp when the secret was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // The actual secret information.
+ // Security sensitive information is redacted (replaced with "[redacted]") for
+ // private keys and passwords in TLS certificates.
+ Secret *anypb.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The *error_state* field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *SecretsConfigDump_DynamicSecret) Reset() {
+ *x = SecretsConfigDump_DynamicSecret{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump_DynamicSecret) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {}
+
+func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetSecret() *anypb.Any {
+ if x != nil {
+ return x.Secret
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// StaticSecret specifies statically loaded secret in bootstrap.
+type SecretsConfigDump_StaticSecret struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the secret.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The timestamp when the secret was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // The actual secret information.
+ // Security sensitive information is redacted (replaced with "[redacted]") for
+ // private keys and passwords in TLS certificates.
+ Secret *anypb.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"`
+}
+
+func (x *SecretsConfigDump_StaticSecret) Reset() {
+ *x = SecretsConfigDump_StaticSecret{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump_StaticSecret) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump_StaticSecret) ProtoMessage() {}
+
+func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetSecret() *anypb.Any {
+ if x != nil {
+ return x.Secret
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
+ 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61,
+ 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65,
+ 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73,
+ 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44,
+ 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x42, 0x78,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc
+)
+
+func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_config_dump_proto_rawDescData
+}
+
+var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{
+ (*ConfigDump)(nil), // 0: envoy.admin.v3.ConfigDump
+ (*BootstrapConfigDump)(nil), // 1: envoy.admin.v3.BootstrapConfigDump
+ (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump
+ (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret
+ (*anypb.Any)(nil), // 5: google.protobuf.Any
+ (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap
+ (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
+ (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState
+ (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus
+}
+var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{
+ 5, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any
+ 6, // 1: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap
+ 7, // 2: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp
+ 4, // 3: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret
+ 3, // 4: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ 3, // 5: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ 7, // 6: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp
+ 5, // 7: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any
+ 8, // 8: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 9, // 9: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 7, // 10: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp
+ 5, // 11: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_config_dump_proto_init() }
+func file_envoy_admin_v3_config_dump_proto_init() {
+ if File_envoy_admin_v3_config_dump_proto != nil {
+ return
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_config_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BootstrapConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump_DynamicSecret); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump_StaticSecret); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_config_dump_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_config_dump_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_config_dump_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_config_dump_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_config_dump_proto = out.File
+ file_envoy_admin_v3_config_dump_proto_rawDesc = nil
+ file_envoy_admin_v3_config_dump_proto_goTypes = nil
+ file_envoy_admin_v3_config_dump_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go
new file mode 100644
index 000000000..6f494af0b
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go
@@ -0,0 +1,893 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ConfigDump with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ConfigDump with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ConfigDumpMultiError, or
+// nil if none found.
+func (m *ConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by ConfigDump.ValidateAll() if the designated constraints aren't met.
+type ConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ConfigDumpMultiError) AllErrors() []error { return m }
+
+// ConfigDumpValidationError is the validation error returned by
+// ConfigDump.Validate if the designated constraints aren't met.
+type ConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ConfigDumpValidationError) ErrorName() string { return "ConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ConfigDumpValidationError{}
+
+// Validate checks the field values on BootstrapConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *BootstrapConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BootstrapConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// BootstrapConfigDumpMultiError, or nil if none found.
+func (m *BootstrapConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BootstrapConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetBootstrap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBootstrap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return BootstrapConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// BootstrapConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by BootstrapConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type BootstrapConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BootstrapConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BootstrapConfigDumpMultiError) AllErrors() []error { return m }
+
+// BootstrapConfigDumpValidationError is the validation error returned by
+// BootstrapConfigDump.Validate if the designated constraints aren't met.
+type BootstrapConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BootstrapConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BootstrapConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BootstrapConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BootstrapConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BootstrapConfigDumpValidationError) ErrorName() string {
+ return "BootstrapConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e BootstrapConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrapConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BootstrapConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BootstrapConfigDumpValidationError{}
+
+// Validate checks the field values on SecretsConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SecretsConfigDumpMultiError, or nil if none found.
+func (m *SecretsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicActiveSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicWarmingSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return SecretsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by SecretsConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type SecretsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDumpMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDumpValidationError is the validation error returned by
+// SecretsConfigDump.Validate if the designated constraints aren't met.
+type SecretsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDumpValidationError) ErrorName() string {
+ return "SecretsConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDumpValidationError{}
+
+// Validate checks the field values on SecretsConfigDump_DynamicSecret with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump_DynamicSecret) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// SecretsConfigDump_DynamicSecretMultiError, or nil if none found.
+func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSecret()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return SecretsConfigDump_DynamicSecretMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple
+// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll()
+// if the designated constraints aren't met.
+type SecretsConfigDump_DynamicSecretMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDump_DynamicSecretMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDump_DynamicSecretValidationError is the validation error
+// returned by SecretsConfigDump_DynamicSecret.Validate if the designated
+// constraints aren't met.
+type SecretsConfigDump_DynamicSecretValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string {
+ return "SecretsConfigDump_DynamicSecretValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDump_DynamicSecretValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDump_DynamicSecretValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDump_DynamicSecretValidationError{}
+
+// Validate checks the field values on SecretsConfigDump_StaticSecret with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump_StaticSecret) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// SecretsConfigDump_StaticSecretMultiError, or nil if none found.
+func (m *SecretsConfigDump_StaticSecret) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump_StaticSecret) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSecret()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return SecretsConfigDump_StaticSecretMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple
+// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll()
+// if the designated constraints aren't met.
+type SecretsConfigDump_StaticSecretMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDump_StaticSecretMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDump_StaticSecretValidationError is the validation error
+// returned by SecretsConfigDump_StaticSecret.Validate if the designated
+// constraints aren't met.
+type SecretsConfigDump_StaticSecretValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string {
+ return "SecretsConfigDump_StaticSecretValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDump_StaticSecretValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDump_StaticSecretValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDump_StaticSecretValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go
new file mode 100644
index 000000000..f48e702a1
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go
@@ -0,0 +1,2254 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource status from the view of a xDS client, which tells the synchronization
+// status between the xDS client and the xDS server.
+type ClientResourceStatus int32
+
+const (
+ // Resource status is not available/unknown.
+ ClientResourceStatus_UNKNOWN ClientResourceStatus = 0
+ // Client requested this resource but hasn't received any update from management
+ // server. The client will not fail requests, but will queue them until update
+ // arrives or the client times out waiting for the resource.
+ ClientResourceStatus_REQUESTED ClientResourceStatus = 1
+ // This resource has been requested by the client but has either not been
+ // delivered by the server or was previously delivered by the server and then
+ // subsequently removed from resources provided by the server. For more
+ // information, please refer to the :ref:`"Knowing When a Requested Resource
+ // Does Not Exist" ` section.
+ ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2
+ // Client received this resource and replied with ACK.
+ ClientResourceStatus_ACKED ClientResourceStatus = 3
+ // Client received this resource and replied with NACK.
+ ClientResourceStatus_NACKED ClientResourceStatus = 4
+ // Client received an error from the control plane. The attached config
+ // dump is the most recent accepted one. If no config is accepted yet,
+ // the attached config dump will be empty.
+ ClientResourceStatus_RECEIVED_ERROR ClientResourceStatus = 5
+ // Client timed out waiting for the resource from the control plane.
+ ClientResourceStatus_TIMEOUT ClientResourceStatus = 6
+)
+
+// Enum value maps for ClientResourceStatus.
+var (
+ ClientResourceStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "REQUESTED",
+ 2: "DOES_NOT_EXIST",
+ 3: "ACKED",
+ 4: "NACKED",
+ 5: "RECEIVED_ERROR",
+ 6: "TIMEOUT",
+ }
+ ClientResourceStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "REQUESTED": 1,
+ "DOES_NOT_EXIST": 2,
+ "ACKED": 3,
+ "NACKED": 4,
+ "RECEIVED_ERROR": 5,
+ "TIMEOUT": 6,
+ }
+)
+
+func (x ClientResourceStatus) Enum() *ClientResourceStatus {
+ p := new(ClientResourceStatus)
+ *p = x
+ return p
+}
+
+func (x ClientResourceStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0].Descriptor()
+}
+
+func (ClientResourceStatus) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0]
+}
+
+func (x ClientResourceStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ClientResourceStatus.Descriptor instead.
+func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0}
+}
+
+type UpdateFailureState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // What the component configuration would have been if the update had succeeded.
+ // This field may not be populated by xDS clients due to storage overhead.
+ FailedConfiguration *anypb.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"`
+ // Time of the latest failed update attempt.
+ LastUpdateAttempt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"`
+ // Details about the last failed update attempt.
+ Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
+ // This is the version of the rejected resource.
+ // [#not-implemented-hide:]
+ VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+}
+
+func (x *UpdateFailureState) Reset() {
+ *x = UpdateFailureState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateFailureState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateFailureState) ProtoMessage() {}
+
+func (x *UpdateFailureState) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead.
+func (*UpdateFailureState) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UpdateFailureState) GetFailedConfiguration() *anypb.Any {
+ if x != nil {
+ return x.FailedConfiguration
+ }
+ return nil
+}
+
+func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdateAttempt
+ }
+ return nil
+}
+
+func (x *UpdateFailureState) GetDetails() string {
+ if x != nil {
+ return x.Details
+ }
+ return ""
+}
+
+func (x *UpdateFailureState) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+// Envoy's listener manager fills this message with all currently known listeners. Listener
+// configuration information can be used to recreate an Envoy configuration by populating all
+// listeners as static listeners or by returning them in a LDS response.
+type ListenersConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the :ref:`version_info ` in the
+ // last processed LDS discovery response. If there are only static bootstrap listeners, this field
+ // will be "".
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The statically loaded listener configs.
+ StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"`
+ // State for any warming, active, or draining listeners.
+ DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"`
+}
+
+func (x *ListenersConfigDump) Reset() {
+ *x = ListenersConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump) ProtoMessage() {}
+
+func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListenersConfigDump) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener {
+ if x != nil {
+ return x.StaticListeners
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener {
+ if x != nil {
+ return x.DynamicListeners
+ }
+ return nil
+}
+
+// Envoy's cluster manager fills this message with all currently known clusters. Cluster
+// configuration information can be used to recreate an Envoy configuration by populating all
+// clusters as static clusters or by returning them in a CDS response.
+type ClustersConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the :ref:`version_info ` in the
+ // last processed CDS discovery response. If there are only static bootstrap clusters, this field
+ // will be "".
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The statically loaded cluster configs.
+ StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"`
+ // The dynamically loaded active clusters. These are clusters that are available to service
+ // data plane traffic.
+ DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"`
+ // The dynamically loaded warming clusters. These are clusters that are currently undergoing
+ // warming in preparation to service data plane traffic. Note that if attempting to recreate an
+ // Envoy configuration from a configuration dump, the warming clusters should generally be
+ // discarded.
+ DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"`
+}
+
+func (x *ClustersConfigDump) Reset() {
+ *x = ClustersConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump) ProtoMessage() {}
+
+func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ClustersConfigDump) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster {
+ if x != nil {
+ return x.StaticClusters
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster {
+ if x != nil {
+ return x.DynamicActiveClusters
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster {
+ if x != nil {
+ return x.DynamicWarmingClusters
+ }
+ return nil
+}
+
+// Envoy's RDS implementation fills this message with all currently loaded routes, as described by
+// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration
+// or defined inline while configuring listeners are separated from those configured dynamically via RDS.
+// Route configuration information can be used to recreate an Envoy configuration by populating all routes
+// as static routes or by returning them in RDS responses.
+type RoutesConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded route configs.
+ StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"`
+ // The dynamically loaded route configs.
+ DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"`
+}
+
+func (x *RoutesConfigDump) Reset() {
+ *x = RoutesConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump) ProtoMessage() {}
+
+func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig {
+ if x != nil {
+ return x.StaticRouteConfigs
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig {
+ if x != nil {
+ return x.DynamicRouteConfigs
+ }
+ return nil
+}
+
+// Envoy's scoped RDS implementation fills this message with all currently loaded route
+// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both
+// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the
+// dynamically obtained scopes via the SRDS API.
+type ScopedRoutesConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded scoped route configs.
+ InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"`
+ // The dynamically loaded scoped route configs.
+ DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump) Reset() {
+ *x = ScopedRoutesConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs {
+ if x != nil {
+ return x.InlineScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs {
+ if x != nil {
+ return x.DynamicScopedRouteConfigs
+ }
+ return nil
+}
+
+// Envoy's admin fill this message with all currently known endpoints. Endpoint
+// configuration information can be used to recreate an Envoy configuration by populating all
+// endpoints as static endpoints or by returning them in an EDS response.
+type EndpointsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded endpoint configs.
+ StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"`
+ // The dynamically loaded endpoint configs.
+ DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"`
+}
+
+func (x *EndpointsConfigDump) Reset() {
+ *x = EndpointsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump) ProtoMessage() {}
+
+func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig {
+ if x != nil {
+ return x.StaticEndpointConfigs
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig {
+ if x != nil {
+ return x.DynamicEndpointConfigs
+ }
+ return nil
+}
+
+// Envoy's ECDS service fills this message with all currently extension
+// configuration. Extension configuration information can be used to recreate
+// an Envoy ECDS listener and HTTP filters as static filters or by returning
+// them in ECDS response.
+type EcdsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ECDS filter configs.
+ EcdsFilters []*EcdsConfigDump_EcdsFilterConfig `protobuf:"bytes,1,rep,name=ecds_filters,json=ecdsFilters,proto3" json:"ecds_filters,omitempty"`
+}
+
+func (x *EcdsConfigDump) Reset() {
+ *x = EcdsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EcdsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EcdsConfigDump) ProtoMessage() {}
+
+func (x *EcdsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EcdsConfigDump.ProtoReflect.Descriptor instead.
+func (*EcdsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *EcdsConfigDump) GetEcdsFilters() []*EcdsConfigDump_EcdsFilterConfig {
+ if x != nil {
+ return x.EcdsFilters
+ }
+ return nil
+}
+
+// Describes a statically loaded listener.
+type ListenersConfigDump_StaticListener struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The listener config.
+ Listener *anypb.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"`
+ // The timestamp when the Listener was last successfully updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ListenersConfigDump_StaticListener) Reset() {
+ *x = ListenersConfigDump_StaticListener{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_StaticListener) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_StaticListener) ProtoMessage() {}
+
+func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *ListenersConfigDump_StaticListener) GetListener() *anypb.Any {
+ if x != nil {
+ return x.Listener
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+type ListenersConfigDump_DynamicListenerState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time
+ // that the listener was loaded. In the future, discrete per-listener versions may be supported
+ // by the API.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The listener config.
+ Listener *anypb.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"`
+ // The timestamp when the Listener was last successfully updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) Reset() {
+ *x = ListenersConfigDump_DynamicListenerState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {}
+
+func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetListener() *anypb.Any {
+ if x != nil {
+ return x.Listener
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Describes a dynamically loaded listener via the LDS API.
+// [#next-free-field: 7]
+type ListenersConfigDump_DynamicListener struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name or unique id of this listener, pulled from the DynamicListenerState config.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The listener state for any active listener by this name.
+ // These are listeners that are available to service data plane traffic.
+ ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"`
+ // The listener state for any warming listener by this name.
+ // These are listeners that are currently undergoing warming in preparation to service data
+ // plane traffic. Note that if attempting to recreate an Envoy configuration from a
+ // configuration dump, the warming listeners should generally be discarded.
+ WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"`
+ // The listener state for any draining listener by this name.
+ // These are listeners that are currently undergoing draining in preparation to stop servicing
+ // data plane traffic. Note that if attempting to recreate an Envoy configuration from a
+ // configuration dump, the draining listeners should generally be discarded.
+ DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ListenersConfigDump_DynamicListener) Reset() {
+ *x = ListenersConfigDump_DynamicListener{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_DynamicListener) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_DynamicListener) ProtoMessage() {}
+
+func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.ActiveState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.WarmingState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.DrainingState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// Describes a statically loaded cluster.
+type ClustersConfigDump_StaticCluster struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The cluster config.
+ Cluster *anypb.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // The timestamp when the Cluster was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ClustersConfigDump_StaticCluster) Reset() {
+ *x = ClustersConfigDump_StaticCluster{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump_StaticCluster) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump_StaticCluster) ProtoMessage() {}
+
+func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *ClustersConfigDump_StaticCluster) GetCluster() *anypb.Any {
+ if x != nil {
+ return x.Cluster
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Describes a dynamically loaded cluster via the CDS API.
+// [#next-free-field: 6]
+type ClustersConfigDump_DynamicCluster struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time
+ // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by
+ // the API.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The cluster config.
+ Cluster *anypb.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // The timestamp when the Cluster was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ClustersConfigDump_DynamicCluster) Reset() {
+ *x = ClustersConfigDump_DynamicCluster{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump_DynamicCluster) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {}
+
+func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetCluster() *anypb.Any {
+ if x != nil {
+ return x.Cluster
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type RoutesConfigDump_StaticRouteConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The route config.
+ RouteConfig *anypb.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"`
+ // The timestamp when the Route was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) Reset() {
+ *x = RoutesConfigDump_StaticRouteConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {}
+
+func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *anypb.Any {
+ if x != nil {
+ return x.RouteConfig
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type RoutesConfigDump_DynamicRouteConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the route configuration was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The route config.
+ RouteConfig *anypb.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"`
+ // The timestamp when the Route was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) Reset() {
+ *x = RoutesConfigDump_DynamicRouteConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *anypb.Any {
+ if x != nil {
+ return x.RouteConfig
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the scoped route configurations.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The scoped route configurations.
+ ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"`
+ // The timestamp when the scoped route config set was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() {
+ *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any {
+ if x != nil {
+ return x.ScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the scoped route configurations.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the scoped routes configuration was loaded.
+ VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The scoped route configurations.
+ ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"`
+ // The timestamp when the scoped route config set was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() {
+ *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 1}
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any {
+ if x != nil {
+ return x.ScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type EndpointsConfigDump_StaticEndpointConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The endpoint config.
+ EndpointConfig *anypb.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
+ // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() {
+ *x = EndpointsConfigDump_StaticEndpointConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *anypb.Any {
+ if x != nil {
+ return x.EndpointConfig
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type EndpointsConfigDump_DynamicEndpointConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the endpoint configuration was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The endpoint config.
+ EndpointConfig *anypb.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
+ // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() {
+ *x = EndpointsConfigDump_DynamicEndpointConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 1}
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *anypb.Any {
+ if x != nil {
+ return x.EndpointConfig
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// [#next-free-field: 6]
+type EcdsConfigDump_EcdsFilterConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently
+ // taken from the :ref:`version_info
+ // `
+ // field at the time that the ECDS filter was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The ECDS filter config.
+ EcdsFilter *anypb.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"`
+ // The timestamp when the ECDS filter was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this
+ // particular resource along with the reason and timestamp. For successfully
+ // updated or acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) Reset() {
+ *x = EcdsConfigDump_EcdsFilterConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EcdsConfigDump_EcdsFilterConfig) ProtoMessage() {}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EcdsConfigDump_EcdsFilterConfig.ProtoReflect.Descriptor instead.
+func (*EcdsConfigDump_EcdsFilterConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *anypb.Any {
+ if x != nil {
+ return x.EcdsFilter
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+var File_envoy_admin_v3_config_dump_shared_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_config_dump_shared_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47,
+ 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65,
+ 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a,
+ 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
+ 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22,
+ 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, 0x0a, 0x10, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x64, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d,
+ 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x0e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x30,
+ 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a,
+ 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x1a, 0xef,
+ 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c,
+ 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b,
+ 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x43, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x61, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44,
+ 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12,
+ 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74,
+ 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x64, 0x79,
+ 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x15,
+ 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63,
+ 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a,
+ 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44,
+ 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0c,
+ 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52,
+ 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x0a,
+ 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75,
+ 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x7e, 0x0a,
+ 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72,
+ 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e,
+ 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65,
+ 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x81, 0x01,
+ 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64,
+ 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75,
+ 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61,
+ 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45,
+ 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63,
+ 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12,
+ 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61,
+ 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x31,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65,
+ 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x22, 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64,
+ 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45,
+ 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x1a, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64,
+ 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46,
+ 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0x89, 0x04, 0x0a, 0x0e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x52, 0x0a, 0x0c, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x63, 0x64,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xf7, 0x02, 0x0a, 0x10, 0x45, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21,
+ 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x35, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2a, 0x7e,
+ 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+ 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44,
+ 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45,
+ 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10,
+ 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a,
+ 0x0e, 0x52, 0x45, 0x43, 0x45, 0x49, 0x56, 0x45, 0x44, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10,
+ 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x06, 0x42, 0x7e,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescData = file_envoy_admin_v3_config_dump_shared_proto_rawDesc
+)
+
+func file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_shared_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescData
+}
+
+var file_envoy_admin_v3_config_dump_shared_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_admin_v3_config_dump_shared_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{
+ (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus
+ (*UpdateFailureState)(nil), // 1: envoy.admin.v3.UpdateFailureState
+ (*ListenersConfigDump)(nil), // 2: envoy.admin.v3.ListenersConfigDump
+ (*ClustersConfigDump)(nil), // 3: envoy.admin.v3.ClustersConfigDump
+ (*RoutesConfigDump)(nil), // 4: envoy.admin.v3.RoutesConfigDump
+ (*ScopedRoutesConfigDump)(nil), // 5: envoy.admin.v3.ScopedRoutesConfigDump
+ (*EndpointsConfigDump)(nil), // 6: envoy.admin.v3.EndpointsConfigDump
+ (*EcdsConfigDump)(nil), // 7: envoy.admin.v3.EcdsConfigDump
+ (*ListenersConfigDump_StaticListener)(nil), // 8: envoy.admin.v3.ListenersConfigDump.StaticListener
+ (*ListenersConfigDump_DynamicListenerState)(nil), // 9: envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ (*ListenersConfigDump_DynamicListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.DynamicListener
+ (*ClustersConfigDump_StaticCluster)(nil), // 11: envoy.admin.v3.ClustersConfigDump.StaticCluster
+ (*ClustersConfigDump_DynamicCluster)(nil), // 12: envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ (*RoutesConfigDump_StaticRouteConfig)(nil), // 13: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig
+ (*RoutesConfigDump_DynamicRouteConfig)(nil), // 14: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig
+ (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 15: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs
+ (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 16: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs
+ (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig
+ (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig
+ (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig
+ (*anypb.Any)(nil), // 20: google.protobuf.Any
+ (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
+}
+var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{
+ 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any
+ 21, // 1: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp
+ 8, // 2: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener
+ 10, // 3: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener
+ 11, // 4: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster
+ 12, // 5: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ 12, // 6: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ 13, // 7: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig
+ 14, // 8: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig
+ 15, // 9: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs
+ 16, // 10: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs
+ 17, // 11: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig
+ 18, // 12: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig
+ 19, // 13: envoy.admin.v3.EcdsConfigDump.ecds_filters:type_name -> envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig
+ 20, // 14: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any
+ 21, // 15: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 16: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any
+ 21, // 17: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp
+ 9, // 18: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 9, // 19: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 9, // 20: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 1, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 23: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any
+ 21, // 24: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 25: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any
+ 21, // 26: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 27: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 28: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 29: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any
+ 21, // 30: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 31: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any
+ 21, // 32: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 33: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 34: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 35: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any
+ 21, // 36: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 37: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any
+ 21, // 38: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 39: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 40: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 41: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any
+ 21, // 42: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 43: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any
+ 21, // 44: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 45: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 46: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 47: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.ecds_filter:type_name -> google.protobuf.Any
+ 21, // 48: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 49: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 50: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 51, // [51:51] is the sub-list for method output_type
+ 51, // [51:51] is the sub-list for method input_type
+ 51, // [51:51] is the sub-list for extension type_name
+ 51, // [51:51] is the sub-list for extension extendee
+ 0, // [0:51] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_config_dump_shared_proto_init() }
+func file_envoy_admin_v3_config_dump_shared_proto_init() {
+ if File_envoy_admin_v3_config_dump_shared_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateFailureState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EcdsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_StaticListener); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_DynamicListenerState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_DynamicListener); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump_StaticCluster); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump_DynamicCluster); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump_StaticRouteConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EcdsConfigDump_EcdsFilterConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_config_dump_shared_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 19,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_config_dump_shared_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_config_dump_shared_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_config_dump_shared_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_config_dump_shared_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_config_dump_shared_proto = out.File
+ file_envoy_admin_v3_config_dump_shared_proto_rawDesc = nil
+ file_envoy_admin_v3_config_dump_shared_proto_goTypes = nil
+ file_envoy_admin_v3_config_dump_shared_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go
new file mode 100644
index 000000000..dd16990ad
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go
@@ -0,0 +1,3435 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UpdateFailureState with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UpdateFailureState) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UpdateFailureState with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UpdateFailureStateMultiError, or nil if none found.
+func (m *UpdateFailureState) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UpdateFailureState) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetFailedConfiguration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdateAttempt()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Details
+
+ // no validation rules for VersionInfo
+
+ if len(errors) > 0 {
+ return UpdateFailureStateMultiError(errors)
+ }
+
+ return nil
+}
+
+// UpdateFailureStateMultiError is an error wrapping multiple validation errors
+// returned by UpdateFailureState.ValidateAll() if the designated constraints
+// aren't met.
+type UpdateFailureStateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UpdateFailureStateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UpdateFailureStateMultiError) AllErrors() []error { return m }
+
+// UpdateFailureStateValidationError is the validation error returned by
+// UpdateFailureState.Validate if the designated constraints aren't met.
+type UpdateFailureStateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UpdateFailureStateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UpdateFailureStateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UpdateFailureStateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UpdateFailureStateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UpdateFailureStateValidationError) ErrorName() string {
+ return "UpdateFailureStateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UpdateFailureStateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUpdateFailureState.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UpdateFailureStateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UpdateFailureStateValidationError{}
+
+// Validate checks the field values on ListenersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ListenersConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListenersConfigDumpMultiError, or nil if none found.
+func (m *ListenersConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetStaticListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by ListenersConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDumpMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDumpValidationError is the validation error returned by
+// ListenersConfigDump.Validate if the designated constraints aren't met.
+type ListenersConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDumpValidationError) ErrorName() string {
+ return "ListenersConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDumpValidationError{}
+
+// Validate checks the field values on ClustersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ClustersConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ClustersConfigDumpMultiError, or nil if none found.
+func (m *ClustersConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetStaticClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicActiveClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicWarmingClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ClustersConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by ClustersConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type ClustersConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDumpMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDumpValidationError is the validation error returned by
+// ClustersConfigDump.Validate if the designated constraints aren't met.
+type ClustersConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDumpValidationError) ErrorName() string {
+ return "ClustersConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDumpValidationError{}
+
+// Validate checks the field values on RoutesConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *RoutesConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RoutesConfigDumpMultiError, or nil if none found.
+func (m *RoutesConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return RoutesConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by RoutesConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type RoutesConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDumpMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDumpValidationError is the validation error returned by
+// RoutesConfigDump.Validate if the designated constraints aren't met.
+type RoutesConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDumpValidationError{}
+
+// Validate checks the field values on ScopedRoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ScopedRoutesConfigDumpMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetInlineScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDumpValidationError is the validation error returned by
+// ScopedRoutesConfigDump.Validate if the designated constraints aren't met.
+type ScopedRoutesConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDumpValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDumpValidationError{}
+
+// Validate checks the field values on EndpointsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EndpointsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EndpointsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// EndpointsConfigDumpMultiError, or nil if none found.
+func (m *EndpointsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticEndpointConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicEndpointConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return EndpointsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by EndpointsConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDumpValidationError is the validation error returned by
+// EndpointsConfigDump.Validate if the designated constraints aren't met.
+type EndpointsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDumpValidationError) ErrorName() string {
+ return "EndpointsConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDumpValidationError{}
+
+// Validate checks the field values on EcdsConfigDump with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *EcdsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EcdsConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in EcdsConfigDumpMultiError,
+// or nil if none found.
+func (m *EcdsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EcdsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetEcdsFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return EcdsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// EcdsConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by EcdsConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type EcdsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EcdsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EcdsConfigDumpMultiError) AllErrors() []error { return m }
+
+// EcdsConfigDumpValidationError is the validation error returned by
+// EcdsConfigDump.Validate if the designated constraints aren't met.
+type EcdsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EcdsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EcdsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EcdsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EcdsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EcdsConfigDumpValidationError) ErrorName() string { return "EcdsConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e EcdsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEcdsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EcdsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EcdsConfigDumpValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_StaticListener with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ListenersConfigDump_StaticListener) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump_StaticListener
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ListenersConfigDump_StaticListenerMultiError, or nil if none found.
+func (m *ListenersConfigDump_StaticListener) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_StaticListener) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetListener()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_StaticListenerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple
+// validation errors returned by
+// ListenersConfigDump_StaticListener.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_StaticListenerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_StaticListenerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_StaticListenerValidationError is the validation error
+// returned by ListenersConfigDump_StaticListener.Validate if the designated
+// constraints aren't met.
+type ListenersConfigDump_StaticListenerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string {
+ return "ListenersConfigDump_StaticListenerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_StaticListenerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_StaticListener.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_StaticListenerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_StaticListenerValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_DynamicListenerState
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *ListenersConfigDump_DynamicListenerState) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ListenersConfigDump_DynamicListenerState with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found.
+func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetListener()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_DynamicListenerStateMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping
+// multiple validation errors returned by
+// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerStateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_DynamicListenerStateValidationError is the validation
+// error returned by ListenersConfigDump_DynamicListenerState.Validate if the
+// designated constraints aren't met.
+type ListenersConfigDump_DynamicListenerStateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string {
+ return "ListenersConfigDump_DynamicListenerStateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_DynamicListenerStateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_DynamicListenerStateValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_DynamicListener with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ListenersConfigDump_DynamicListener) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump_DynamicListener
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ListenersConfigDump_DynamicListenerMultiError, or nil if none found.
+func (m *ListenersConfigDump_DynamicListener) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_DynamicListener) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetActiveState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWarmingState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDrainingState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_DynamicListenerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple
+// validation errors returned by
+// ListenersConfigDump_DynamicListener.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_DynamicListenerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_DynamicListenerValidationError is the validation error
+// returned by ListenersConfigDump_DynamicListener.Validate if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string {
+ return "ListenersConfigDump_DynamicListenerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_DynamicListenerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_DynamicListener.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_DynamicListenerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_DynamicListenerValidationError{}
+
+// Validate checks the field values on ClustersConfigDump_StaticCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ClustersConfigDump_StaticCluster) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ClustersConfigDump_StaticClusterMultiError, or nil if none found.
+func (m *ClustersConfigDump_StaticCluster) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump_StaticCluster) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetCluster()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ClustersConfigDump_StaticClusterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple
+// validation errors returned by
+// ClustersConfigDump_StaticCluster.ValidateAll() if the designated
+// constraints aren't met.
+type ClustersConfigDump_StaticClusterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDump_StaticClusterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDump_StaticClusterValidationError is the validation error
+// returned by ClustersConfigDump_StaticCluster.Validate if the designated
+// constraints aren't met.
+type ClustersConfigDump_StaticClusterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string {
+ return "ClustersConfigDump_StaticClusterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDump_StaticClusterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump_StaticCluster.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDump_StaticClusterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDump_StaticClusterValidationError{}
+
+// Validate checks the field values on ClustersConfigDump_DynamicCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ClustersConfigDump_DynamicCluster) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ClustersConfigDump_DynamicClusterMultiError, or nil if none found.
+func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetCluster()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ClustersConfigDump_DynamicClusterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple
+// validation errors returned by
+// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated
+// constraints aren't met.
+type ClustersConfigDump_DynamicClusterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDump_DynamicClusterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDump_DynamicClusterValidationError is the validation error
+// returned by ClustersConfigDump_DynamicCluster.Validate if the designated
+// constraints aren't met.
+type ClustersConfigDump_DynamicClusterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string {
+ return "ClustersConfigDump_DynamicClusterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDump_DynamicClusterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDump_DynamicClusterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDump_DynamicClusterValidationError{}
+
+// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *RoutesConfigDump_StaticRouteConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found.
+func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetRouteConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RoutesConfigDump_StaticRouteConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple
+// validation errors returned by
+// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated
+// constraints aren't met.
+type RoutesConfigDump_StaticRouteConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDump_StaticRouteConfigValidationError is the validation error
+// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated
+// constraints aren't met.
+type RoutesConfigDump_StaticRouteConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string {
+ return "RoutesConfigDump_StaticRouteConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDump_StaticRouteConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDump_StaticRouteConfigValidationError{}
+
+// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found.
+func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetRouteConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return RoutesConfigDump_DynamicRouteConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple
+// validation errors returned by
+// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated
+// constraints aren't met.
+type RoutesConfigDump_DynamicRouteConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error
+// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated
+// constraints aren't met.
+type RoutesConfigDump_DynamicRouteConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string {
+ return "RoutesConfigDump_DynamicRouteConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDump_DynamicRouteConfigValidationError{}
+
+// Validate checks the field values on
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ for idx, item := range m.GetScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error
+// wrapping multiple validation errors returned by
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the
+// designated constraints aren't met.
+type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the
+// validation error returned by
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{}
+
+// Validate checks the field values on
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error
+// wrapping multiple validation errors returned by
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the
+// designated constraints aren't met.
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the
+// validation error returned by
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{}
+
+// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found.
+func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetEndpointConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return EndpointsConfigDump_StaticEndpointConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDump_StaticEndpointConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation
+// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the
+// designated constraints aren't met.
+type EndpointsConfigDump_StaticEndpointConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string {
+ return "EndpointsConfigDump_StaticEndpointConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDump_StaticEndpointConfigValidationError{}
+
+// Validate checks the field values on
+// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found.
+func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetEndpointConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDump_DynamicEndpointConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation
+// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the
+// designated constraints aren't met.
+type EndpointsConfigDump_DynamicEndpointConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string {
+ return "EndpointsConfigDump_DynamicEndpointConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDump_DynamicEndpointConfigValidationError{}
+
+// Validate checks the field values on EcdsConfigDump_EcdsFilterConfig with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EcdsConfigDump_EcdsFilterConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EcdsConfigDump_EcdsFilterConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// EcdsConfigDump_EcdsFilterConfigMultiError, or nil if none found.
+func (m *EcdsConfigDump_EcdsFilterConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetEcdsFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEcdsFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return EcdsConfigDump_EcdsFilterConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EcdsConfigDump_EcdsFilterConfigMultiError is an error wrapping multiple
+// validation errors returned by EcdsConfigDump_EcdsFilterConfig.ValidateAll()
+// if the designated constraints aren't met.
+type EcdsConfigDump_EcdsFilterConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EcdsConfigDump_EcdsFilterConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EcdsConfigDump_EcdsFilterConfigMultiError) AllErrors() []error { return m }
+
+// EcdsConfigDump_EcdsFilterConfigValidationError is the validation error
+// returned by EcdsConfigDump_EcdsFilterConfig.Validate if the designated
+// constraints aren't met.
+type EcdsConfigDump_EcdsFilterConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) ErrorName() string {
+ return "EcdsConfigDump_EcdsFilterConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEcdsConfigDump_EcdsFilterConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EcdsConfigDump_EcdsFilterConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EcdsConfigDump_EcdsFilterConfigValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go
new file mode 100644
index 000000000..934de8568
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go
@@ -0,0 +1,1715 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *UpdateFailureState) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateFailureState) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UpdateFailureState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Details) > 0 {
+ i -= len(m.Details)
+ copy(dAtA[i:], m.Details)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Details)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastUpdateAttempt != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdateAttempt).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.FailedConfiguration != nil {
+ size, err := (*anypb.Any)(m.FailedConfiguration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Listener != nil {
+ size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Listener != nil {
+ size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DrainingState != nil {
+ size, err := m.DrainingState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.WarmingState != nil {
+ size, err := m.WarmingState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ActiveState != nil {
+ size, err := m.ActiveState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicListeners) > 0 {
+ for iNdEx := len(m.DynamicListeners) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticListeners) > 0 {
+ for iNdEx := len(m.StaticListeners) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Cluster != nil {
+ size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Cluster != nil {
+ size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicWarmingClusters) > 0 {
+ for iNdEx := len(m.DynamicWarmingClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicWarmingClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.DynamicActiveClusters) > 0 {
+ for iNdEx := len(m.DynamicActiveClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicActiveClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticClusters) > 0 {
+ for iNdEx := len(m.StaticClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.RouteConfig != nil {
+ size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.RouteConfig != nil {
+ size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicRouteConfigs) > 0 {
+ for iNdEx := len(m.DynamicRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticRouteConfigs) > 0 {
+ for iNdEx := len(m.StaticRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.DynamicScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.InlineScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.InlineScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.InlineScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.EndpointConfig != nil {
+ size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.EndpointConfig != nil {
+ size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicEndpointConfigs) > 0 {
+ for iNdEx := len(m.DynamicEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticEndpointConfigs) > 0 {
+ for iNdEx := len(m.StaticEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.EcdsFilter != nil {
+ size, err := (*anypb.Any)(m.EcdsFilter).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EcdsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EcdsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EcdsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.EcdsFilters) > 0 {
+ for iNdEx := len(m.EcdsFilters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.EcdsFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UpdateFailureState) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.FailedConfiguration != nil {
+ l = (*anypb.Any)(m.FailedConfiguration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdateAttempt != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdateAttempt).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.Details)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_StaticListener) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Listener != nil {
+ l = (*anypb.Any)(m.Listener).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Listener != nil {
+ l = (*anypb.Any)(m.Listener).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_DynamicListener) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ActiveState != nil {
+ l = m.ActiveState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.WarmingState != nil {
+ l = m.WarmingState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainingState != nil {
+ l = m.DrainingState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StaticListeners) > 0 {
+ for _, e := range m.StaticListeners {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicListeners) > 0 {
+ for _, e := range m.DynamicListeners {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump_StaticCluster) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ l = (*anypb.Any)(m.Cluster).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump_DynamicCluster) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Cluster != nil {
+ l = (*anypb.Any)(m.Cluster).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StaticClusters) > 0 {
+ for _, e := range m.StaticClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicActiveClusters) > 0 {
+ for _, e := range m.DynamicActiveClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicWarmingClusters) > 0 {
+ for _, e := range m.DynamicWarmingClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RouteConfig != nil {
+ l = (*anypb.Any)(m.RouteConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RouteConfig != nil {
+ l = (*anypb.Any)(m.RouteConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticRouteConfigs) > 0 {
+ for _, e := range m.StaticRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicRouteConfigs) > 0 {
+ for _, e := range m.DynamicRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for _, e := range m.ScopedRouteConfigs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for _, e := range m.ScopedRouteConfigs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.InlineScopedRouteConfigs) > 0 {
+ for _, e := range m.InlineScopedRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicScopedRouteConfigs) > 0 {
+ for _, e := range m.DynamicScopedRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.EndpointConfig != nil {
+ l = (*anypb.Any)(m.EndpointConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EndpointConfig != nil {
+ l = (*anypb.Any)(m.EndpointConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticEndpointConfigs) > 0 {
+ for _, e := range m.StaticEndpointConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicEndpointConfigs) > 0 {
+ for _, e := range m.DynamicEndpointConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EcdsFilter != nil {
+ l = (*anypb.Any)(m.EcdsFilter).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EcdsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.EcdsFilters) > 0 {
+ for _, e := range m.EcdsFilters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go
new file mode 100644
index 000000000..78e37eec9
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go
@@ -0,0 +1,466 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Configs) > 0 {
+ for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.Configs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BootstrapConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BootstrapConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *BootstrapConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Bootstrap != nil {
+ if vtmsg, ok := interface{}(m.Bootstrap).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Bootstrap)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Secret != nil {
+ size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Secret != nil {
+ size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicWarmingSecrets) > 0 {
+ for iNdEx := len(m.DynamicWarmingSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicWarmingSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.DynamicActiveSecrets) > 0 {
+ for iNdEx := len(m.DynamicActiveSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicActiveSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.StaticSecrets) > 0 {
+ for iNdEx := len(m.StaticSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Configs) > 0 {
+ for _, e := range m.Configs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *BootstrapConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Bootstrap != nil {
+ if size, ok := interface{}(m.Bootstrap).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Bootstrap)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump_DynamicSecret) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Secret != nil {
+ l = (*anypb.Any)(m.Secret).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump_StaticSecret) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Secret != nil {
+ l = (*anypb.Any)(m.Secret).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticSecrets) > 0 {
+ for _, e := range m.StaticSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicActiveSecrets) > 0 {
+ for _, e := range m.DynamicActiveSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicWarmingSecrets) > 0 {
+ for _, e := range m.DynamicWarmingSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go
new file mode 100644
index 000000000..632817fac
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go
@@ -0,0 +1,241 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,
+// which provides the information of their unready targets.
+// The :ref:`/init_dump ` will dump all unready targets information.
+type UnreadyTargetsDumps struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // You can choose specific component to dump unready targets with mask query parameter.
+ // See :ref:`/init_dump?mask={} ` for more information.
+ // The dumps of unready targets of all init managers.
+ UnreadyTargetsDumps []*UnreadyTargetsDumps_UnreadyTargetsDump `protobuf:"bytes,1,rep,name=unready_targets_dumps,json=unreadyTargetsDumps,proto3" json:"unready_targets_dumps,omitempty"`
+}
+
+func (x *UnreadyTargetsDumps) Reset() {
+ *x = UnreadyTargetsDumps{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnreadyTargetsDumps) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnreadyTargetsDumps) ProtoMessage() {}
+
+func (x *UnreadyTargetsDumps) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnreadyTargetsDumps.ProtoReflect.Descriptor instead.
+func (*UnreadyTargetsDumps) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UnreadyTargetsDumps) GetUnreadyTargetsDumps() []*UnreadyTargetsDumps_UnreadyTargetsDump {
+ if x != nil {
+ return x.UnreadyTargetsDumps
+ }
+ return nil
+}
+
+// Message of unready targets information of an init manager.
+type UnreadyTargetsDumps_UnreadyTargetsDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the init manager. Example: "init_manager_xxx".
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Names of unready targets of the init manager. Example: "target_xxx".
+ TargetNames []string `protobuf:"bytes,2,rep,name=target_names,json=targetNames,proto3" json:"target_names,omitempty"`
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) Reset() {
+ *x = UnreadyTargetsDumps_UnreadyTargetsDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnreadyTargetsDumps_UnreadyTargetsDump) ProtoMessage() {}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnreadyTargetsDumps_UnreadyTargetsDump.ProtoReflect.Descriptor instead.
+func (*UnreadyTargetsDumps_UnreadyTargetsDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetTargetNames() []string {
+ if x != nil {
+ return x.TargetNames
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_init_dump_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_init_dump_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xce, 0x01, 0x0a, 0x13, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x75, 0x6e, 0x72, 0x65, 0x61,
+ 0x64, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x2e, 0x55, 0x6e, 0x72, 0x65,
+ 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x52, 0x13,
+ 0x75, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75,
+ 0x6d, 0x70, 0x73, 0x1a, 0x4b, 0x0a, 0x12, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a,
+ 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x49, 0x6e, 0x69, 0x74, 0x44, 0x75,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_init_dump_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_init_dump_proto_rawDescData = file_envoy_admin_v3_init_dump_proto_rawDesc
+)
+
+func file_envoy_admin_v3_init_dump_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_init_dump_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_init_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_init_dump_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_init_dump_proto_rawDescData
+}
+
+var file_envoy_admin_v3_init_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_init_dump_proto_goTypes = []interface{}{
+ (*UnreadyTargetsDumps)(nil), // 0: envoy.admin.v3.UnreadyTargetsDumps
+ (*UnreadyTargetsDumps_UnreadyTargetsDump)(nil), // 1: envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump
+}
+var file_envoy_admin_v3_init_dump_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.UnreadyTargetsDumps.unready_targets_dumps:type_name -> envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_init_dump_proto_init() }
+func file_envoy_admin_v3_init_dump_proto_init() {
+ if File_envoy_admin_v3_init_dump_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_init_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnreadyTargetsDumps); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_init_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnreadyTargetsDumps_UnreadyTargetsDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_init_dump_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_init_dump_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_init_dump_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_init_dump_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_init_dump_proto = out.File
+ file_envoy_admin_v3_init_dump_proto_rawDesc = nil
+ file_envoy_admin_v3_init_dump_proto_goTypes = nil
+ file_envoy_admin_v3_init_dump_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go
new file mode 100644
index 000000000..f746a1264
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go
@@ -0,0 +1,281 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UnreadyTargetsDumps with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UnreadyTargetsDumps) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UnreadyTargetsDumps with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UnreadyTargetsDumpsMultiError, or nil if none found.
+func (m *UnreadyTargetsDumps) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UnreadyTargetsDumps) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetUnreadyTargetsDumps() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return UnreadyTargetsDumpsMultiError(errors)
+ }
+
+ return nil
+}
+
+// UnreadyTargetsDumpsMultiError is an error wrapping multiple validation
+// errors returned by UnreadyTargetsDumps.ValidateAll() if the designated
+// constraints aren't met.
+type UnreadyTargetsDumpsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UnreadyTargetsDumpsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UnreadyTargetsDumpsMultiError) AllErrors() []error { return m }
+
+// UnreadyTargetsDumpsValidationError is the validation error returned by
+// UnreadyTargetsDumps.Validate if the designated constraints aren't met.
+type UnreadyTargetsDumpsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UnreadyTargetsDumpsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UnreadyTargetsDumpsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UnreadyTargetsDumpsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UnreadyTargetsDumpsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UnreadyTargetsDumpsValidationError) ErrorName() string {
+ return "UnreadyTargetsDumpsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UnreadyTargetsDumpsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUnreadyTargetsDumps.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UnreadyTargetsDumpsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UnreadyTargetsDumpsValidationError{}
+
+// Validate checks the field values on UnreadyTargetsDumps_UnreadyTargetsDump
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// UnreadyTargetsDumps_UnreadyTargetsDump with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError, or nil if none found.
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if len(errors) > 0 {
+ return UnreadyTargetsDumps_UnreadyTargetsDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError is an error wrapping
+// multiple validation errors returned by
+// UnreadyTargetsDumps_UnreadyTargetsDump.ValidateAll() if the designated
+// constraints aren't met.
+type UnreadyTargetsDumps_UnreadyTargetsDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) AllErrors() []error { return m }
+
+// UnreadyTargetsDumps_UnreadyTargetsDumpValidationError is the validation
+// error returned by UnreadyTargetsDumps_UnreadyTargetsDump.Validate if the
+// designated constraints aren't met.
+type UnreadyTargetsDumps_UnreadyTargetsDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) ErrorName() string {
+ return "UnreadyTargetsDumps_UnreadyTargetsDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUnreadyTargetsDumps_UnreadyTargetsDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go
new file mode 100644
index 000000000..d957042b8
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go
@@ -0,0 +1,149 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.TargetNames) > 0 {
+ for iNdEx := len(m.TargetNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.TargetNames[iNdEx])
+ copy(dAtA[i:], m.TargetNames[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetNames[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnreadyTargetsDumps) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnreadyTargetsDumps) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UnreadyTargetsDumps) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.UnreadyTargetsDumps) > 0 {
+ for iNdEx := len(m.UnreadyTargetsDumps) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.UnreadyTargetsDumps[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.TargetNames) > 0 {
+ for _, s := range m.TargetNames {
+ l = len(s)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *UnreadyTargetsDumps) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.UnreadyTargetsDumps) > 0 {
+ for _, e := range m.UnreadyTargetsDumps {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go
new file mode 100644
index 000000000..71ab9ed88
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go
@@ -0,0 +1,268 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Admin endpoint uses this wrapper for “/listeners“ to display listener status information.
+// See :ref:`/listeners ` for more information.
+type Listeners struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of listener statuses.
+ ListenerStatuses []*ListenerStatus `protobuf:"bytes,1,rep,name=listener_statuses,json=listenerStatuses,proto3" json:"listener_statuses,omitempty"`
+}
+
+func (x *Listeners) Reset() {
+ *x = Listeners{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Listeners) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Listeners) ProtoMessage() {}
+
+func (x *Listeners) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Listeners.ProtoReflect.Descriptor instead.
+func (*Listeners) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Listeners) GetListenerStatuses() []*ListenerStatus {
+ if x != nil {
+ return x.ListenerStatuses
+ }
+ return nil
+}
+
+// Details an individual listener's current status.
+type ListenerStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the listener
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The actual local address that the listener is listening on. If a listener was configured
+ // to listen on port 0, then this address has the port that was allocated by the OS.
+ LocalAddress *v3.Address `protobuf:"bytes,2,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
+ // The additional addresses the listener is listening on as specified via the :ref:`additional_addresses `
+ // configuration.
+ AdditionalLocalAddresses []*v3.Address `protobuf:"bytes,3,rep,name=additional_local_addresses,json=additionalLocalAddresses,proto3" json:"additional_local_addresses,omitempty"`
+}
+
+func (x *ListenerStatus) Reset() {
+ *x = ListenerStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenerStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenerStatus) ProtoMessage() {}
+
+func (x *ListenerStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenerStatus.ProtoReflect.Descriptor instead.
+func (*ListenerStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListenerStatus) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListenerStatus) GetLocalAddress() *v3.Address {
+ if x != nil {
+ return x.LocalAddress
+ }
+ return nil
+}
+
+func (x *ListenerStatus) GetAdditionalLocalAddresses() []*v3.Address {
+ if x != nil {
+ return x.AdditionalLocalAddresses
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_listeners_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_listeners_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x11, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
+ 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a,
+ 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x5b, 0x0a, 0x1a, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x52, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x3a, 0x29,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x77, 0xba, 0x80, 0xc8, 0xd1, 0x06,
+ 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_listeners_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_listeners_proto_rawDescData = file_envoy_admin_v3_listeners_proto_rawDesc
+)
+
+func file_envoy_admin_v3_listeners_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_listeners_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_listeners_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_listeners_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_listeners_proto_rawDescData
+}
+
+var file_envoy_admin_v3_listeners_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_listeners_proto_goTypes = []interface{}{
+ (*Listeners)(nil), // 0: envoy.admin.v3.Listeners
+ (*ListenerStatus)(nil), // 1: envoy.admin.v3.ListenerStatus
+ (*v3.Address)(nil), // 2: envoy.config.core.v3.Address
+}
+var file_envoy_admin_v3_listeners_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Listeners.listener_statuses:type_name -> envoy.admin.v3.ListenerStatus
+ 2, // 1: envoy.admin.v3.ListenerStatus.local_address:type_name -> envoy.config.core.v3.Address
+ 2, // 2: envoy.admin.v3.ListenerStatus.additional_local_addresses:type_name -> envoy.config.core.v3.Address
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_listeners_proto_init() }
+func file_envoy_admin_v3_listeners_proto_init() {
+ if File_envoy_admin_v3_listeners_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_listeners_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Listeners); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_listeners_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenerStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_listeners_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_listeners_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_listeners_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_listeners_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_listeners_proto = out.File
+ file_envoy_admin_v3_listeners_proto_rawDesc = nil
+ file_envoy_admin_v3_listeners_proto_goTypes = nil
+ file_envoy_admin_v3_listeners_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go
new file mode 100644
index 000000000..02cce2639
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go
@@ -0,0 +1,335 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Listeners with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Listeners) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Listeners with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ListenersMultiError, or nil
+// if none found.
+func (m *Listeners) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Listeners) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetListenerStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenersMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersMultiError is an error wrapping multiple validation errors returned
+// by Listeners.ValidateAll() if the designated constraints aren't met.
+type ListenersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersMultiError) AllErrors() []error { return m }
+
+// ListenersValidationError is the validation error returned by
+// Listeners.Validate if the designated constraints aren't met.
+type ListenersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersValidationError) ErrorName() string { return "ListenersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ListenersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListeners.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersValidationError{}
+
+// Validate checks the field values on ListenerStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ListenerStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenerStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ListenerStatusMultiError,
+// or nil if none found.
+func (m *ListenerStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenerStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetLocalAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetAdditionalLocalAddresses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenerStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenerStatusMultiError is an error wrapping multiple validation errors
+// returned by ListenerStatus.ValidateAll() if the designated constraints
+// aren't met.
+type ListenerStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenerStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenerStatusMultiError) AllErrors() []error { return m }
+
+// ListenerStatusValidationError is the validation error returned by
+// ListenerStatus.Validate if the designated constraints aren't met.
+type ListenerStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenerStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenerStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenerStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenerStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenerStatusValidationError) ErrorName() string { return "ListenerStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ListenerStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenerStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenerStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenerStatusValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go
new file mode 100644
index 000000000..816437acf
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go
@@ -0,0 +1,203 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Listeners) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Listeners) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Listeners) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.ListenerStatuses) > 0 {
+ for iNdEx := len(m.ListenerStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.ListenerStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenerStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenerStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenerStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.AdditionalLocalAddresses) > 0 {
+ for iNdEx := len(m.AdditionalLocalAddresses) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.AdditionalLocalAddresses[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AdditionalLocalAddresses[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.LocalAddress != nil {
+ if vtmsg, ok := interface{}(m.LocalAddress).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalAddress)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Listeners) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ListenerStatuses) > 0 {
+ for _, e := range m.ListenerStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenerStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LocalAddress != nil {
+ if size, ok := interface{}(m.LocalAddress).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalAddress)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.AdditionalLocalAddresses) > 0 {
+ for _, e := range m.AdditionalLocalAddresses {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go
new file mode 100644
index 000000000..74f0a2d4e
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go
@@ -0,0 +1,228 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the internal memory consumption of an Envoy instance. These represent
+// values extracted from an internal TCMalloc instance. For more information, see the section of the
+// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html).
+// [#next-free-field: 7]
+type Memory struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of bytes allocated by the heap for Envoy. This is an alias for
+ // “generic.current_allocated_bytes“.
+ Allocated uint64 `protobuf:"varint,1,opt,name=allocated,proto3" json:"allocated,omitempty"`
+ // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for
+ // “generic.heap_size“.
+ HeapSize uint64 `protobuf:"varint,2,opt,name=heap_size,json=heapSize,proto3" json:"heap_size,omitempty"`
+ // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards
+ // virtual memory usage, and depending on the OS, typically do not count towards physical memory
+ // usage. This is an alias for “tcmalloc.pageheap_unmapped_bytes“.
+ PageheapUnmapped uint64 `protobuf:"varint,3,opt,name=pageheap_unmapped,json=pageheapUnmapped,proto3" json:"pageheap_unmapped,omitempty"`
+ // The number of bytes in free, mapped pages in the page heap. These bytes always count towards
+ // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also
+ // count towards physical memory usage. This is an alias for “tcmalloc.pageheap_free_bytes“.
+ PageheapFree uint64 `protobuf:"varint,4,opt,name=pageheap_free,json=pageheapFree,proto3" json:"pageheap_free,omitempty"`
+ // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias
+ // for “tcmalloc.current_total_thread_cache_bytes“.
+ TotalThreadCache uint64 `protobuf:"varint,5,opt,name=total_thread_cache,json=totalThreadCache,proto3" json:"total_thread_cache,omitempty"`
+ // The number of bytes of the physical memory usage by the allocator. This is an alias for
+ // “generic.total_physical_bytes“.
+ TotalPhysicalBytes uint64 `protobuf:"varint,6,opt,name=total_physical_bytes,json=totalPhysicalBytes,proto3" json:"total_physical_bytes,omitempty"`
+}
+
+func (x *Memory) Reset() {
+ *x = Memory{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_memory_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Memory) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Memory) ProtoMessage() {}
+
+func (x *Memory) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_memory_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Memory.ProtoReflect.Descriptor instead.
+func (*Memory) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_memory_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Memory) GetAllocated() uint64 {
+ if x != nil {
+ return x.Allocated
+ }
+ return 0
+}
+
+func (x *Memory) GetHeapSize() uint64 {
+ if x != nil {
+ return x.HeapSize
+ }
+ return 0
+}
+
+func (x *Memory) GetPageheapUnmapped() uint64 {
+ if x != nil {
+ return x.PageheapUnmapped
+ }
+ return 0
+}
+
+func (x *Memory) GetPageheapFree() uint64 {
+ if x != nil {
+ return x.PageheapFree
+ }
+ return 0
+}
+
+func (x *Memory) GetTotalThreadCache() uint64 {
+ if x != nil {
+ return x.TotalThreadCache
+ }
+ return 0
+}
+
+func (x *Memory) GetTotalPhysicalBytes() uint64 {
+ if x != nil {
+ return x.TotalPhysicalBytes
+ }
+ return 0
+}
+
+var File_envoy_admin_v3_memory_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_memory_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x98, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61,
+ 0x70, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61,
+ 0x70, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x10, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x70,
+ 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x5f, 0x66,
+ 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x68,
+ 0x65, 0x61, 0x70, 0x46, 0x72, 0x65, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
+ 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63,
+ 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x42, 0x74, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_memory_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_memory_proto_rawDescData = file_envoy_admin_v3_memory_proto_rawDesc
+)
+
+func file_envoy_admin_v3_memory_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_memory_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_memory_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_memory_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_memory_proto_rawDescData
+}
+
+var file_envoy_admin_v3_memory_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_memory_proto_goTypes = []interface{}{
+ (*Memory)(nil), // 0: envoy.admin.v3.Memory
+}
+var file_envoy_admin_v3_memory_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_memory_proto_init() }
+func file_envoy_admin_v3_memory_proto_init() {
+ if File_envoy_admin_v3_memory_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_memory_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Memory); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_memory_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_memory_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_memory_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_memory_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_memory_proto = out.File
+ file_envoy_admin_v3_memory_proto_rawDesc = nil
+ file_envoy_admin_v3_memory_proto_goTypes = nil
+ file_envoy_admin_v3_memory_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go
new file mode 100644
index 000000000..bcb9c1d20
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go
@@ -0,0 +1,147 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Memory with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Memory) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Memory with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in MemoryMultiError, or nil if none found.
+func (m *Memory) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Memory) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Allocated
+
+ // no validation rules for HeapSize
+
+ // no validation rules for PageheapUnmapped
+
+ // no validation rules for PageheapFree
+
+ // no validation rules for TotalThreadCache
+
+ // no validation rules for TotalPhysicalBytes
+
+ if len(errors) > 0 {
+ return MemoryMultiError(errors)
+ }
+
+ return nil
+}
+
+// MemoryMultiError is an error wrapping multiple validation errors returned by
+// Memory.ValidateAll() if the designated constraints aren't met.
+type MemoryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MemoryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MemoryMultiError) AllErrors() []error { return m }
+
+// MemoryValidationError is the validation error returned by Memory.Validate if
+// the designated constraints aren't met.
+type MemoryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MemoryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MemoryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MemoryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MemoryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MemoryValidationError) ErrorName() string { return "MemoryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MemoryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMemory.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MemoryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MemoryValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go
new file mode 100644
index 000000000..6e3a23688
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go
@@ -0,0 +1,110 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Memory) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Memory) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Memory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.TotalPhysicalBytes != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalPhysicalBytes))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.TotalThreadCache != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalThreadCache))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.PageheapFree != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapFree))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.PageheapUnmapped != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapUnmapped))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.HeapSize != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeapSize))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Allocated != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Allocated))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Memory) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Allocated != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Allocated))
+ }
+ if m.HeapSize != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.HeapSize))
+ }
+ if m.PageheapUnmapped != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapUnmapped))
+ }
+ if m.PageheapFree != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapFree))
+ }
+ if m.TotalThreadCache != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalThreadCache))
+ }
+ if m.TotalPhysicalBytes != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalPhysicalBytes))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go
new file mode 100644
index 000000000..21866a3e7
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go
@@ -0,0 +1,234 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SimpleMetric_Type int32
+
+const (
+ SimpleMetric_COUNTER SimpleMetric_Type = 0
+ SimpleMetric_GAUGE SimpleMetric_Type = 1
+)
+
+// Enum value maps for SimpleMetric_Type.
+var (
+ SimpleMetric_Type_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ }
+ SimpleMetric_Type_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ }
+)
+
+func (x SimpleMetric_Type) Enum() *SimpleMetric_Type {
+ p := new(SimpleMetric_Type)
+ *p = x
+ return p
+}
+
+func (x SimpleMetric_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SimpleMetric_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (SimpleMetric_Type) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_metrics_proto_enumTypes[0]
+}
+
+func (x SimpleMetric_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SimpleMetric_Type.Descriptor instead.
+func (SimpleMetric_Type) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Proto representation of an Envoy Counter or Gauge value.
+type SimpleMetric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of the metric represented.
+ Type SimpleMetric_Type `protobuf:"varint,1,opt,name=type,proto3,enum=envoy.admin.v3.SimpleMetric_Type" json:"type,omitempty"`
+ // Current metric value.
+ Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Name of the metric.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *SimpleMetric) Reset() {
+ *x = SimpleMetric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SimpleMetric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SimpleMetric) ProtoMessage() {}
+
+func (x *SimpleMetric) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SimpleMetric.ProtoReflect.Descriptor instead.
+func (*SimpleMetric) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SimpleMetric) GetType() SimpleMetric_Type {
+ if x != nil {
+ return x.Type
+ }
+ return SimpleMetric_COUNTER
+}
+
+func (x *SimpleMetric) GetValue() uint64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+func (x *SimpleMetric) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_envoy_admin_v3_metrics_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x22, 0x1e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f,
+ 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45,
+ 0x10, 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x42, 0x75, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_metrics_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_metrics_proto_rawDescData = file_envoy_admin_v3_metrics_proto_rawDesc
+)
+
+func file_envoy_admin_v3_metrics_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_metrics_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_metrics_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_metrics_proto_rawDescData
+}
+
+var file_envoy_admin_v3_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_admin_v3_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_metrics_proto_goTypes = []interface{}{
+ (SimpleMetric_Type)(0), // 0: envoy.admin.v3.SimpleMetric.Type
+ (*SimpleMetric)(nil), // 1: envoy.admin.v3.SimpleMetric
+}
+var file_envoy_admin_v3_metrics_proto_depIdxs = []int32{
+ 0, // 0: envoy.admin.v3.SimpleMetric.type:type_name -> envoy.admin.v3.SimpleMetric.Type
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_metrics_proto_init() }
+func file_envoy_admin_v3_metrics_proto_init() {
+ if File_envoy_admin_v3_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SimpleMetric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_metrics_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_metrics_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_metrics_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_metrics_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_metrics_proto = out.File
+ file_envoy_admin_v3_metrics_proto_rawDesc = nil
+ file_envoy_admin_v3_metrics_proto_goTypes = nil
+ file_envoy_admin_v3_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go
new file mode 100644
index 000000000..903d70e19
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go
@@ -0,0 +1,142 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on SimpleMetric with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *SimpleMetric) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SimpleMetric with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in SimpleMetricMultiError, or
+// nil if none found.
+func (m *SimpleMetric) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SimpleMetric) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Type
+
+ // no validation rules for Value
+
+ // no validation rules for Name
+
+ if len(errors) > 0 {
+ return SimpleMetricMultiError(errors)
+ }
+
+ return nil
+}
+
+// SimpleMetricMultiError is an error wrapping multiple validation errors
+// returned by SimpleMetric.ValidateAll() if the designated constraints aren't met.
+type SimpleMetricMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SimpleMetricMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SimpleMetricMultiError) AllErrors() []error { return m }
+
+// SimpleMetricValidationError is the validation error returned by
+// SimpleMetric.Validate if the designated constraints aren't met.
+type SimpleMetricValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SimpleMetricValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SimpleMetricValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SimpleMetricValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SimpleMetricValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SimpleMetricValidationError) ErrorName() string { return "SimpleMetricValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SimpleMetricValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSimpleMetric.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SimpleMetricValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SimpleMetricValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go
new file mode 100644
index 000000000..0c09ae045
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go
@@ -0,0 +1,89 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *SimpleMetric) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SimpleMetric) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SimpleMetric) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Value != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Type != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SimpleMetric) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Type))
+ }
+ if m.Value != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Value))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go
new file mode 100644
index 000000000..d78d94e57
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go
@@ -0,0 +1,191 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run
+// under :option:`--enable-mutex-tracing`. For more information, see the “absl::Mutex“
+// [docs](https://abseil.io/about/design/mutex#extra-features).
+//
+// *NB*: The wait cycles below are measured by “absl::base_internal::CycleClock“, and may not
+// correspond to core clock frequency. For more information, see the “CycleClock“
+// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).
+type MutexStats struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of individual mutex contentions which have occurred since startup.
+ NumContentions uint64 `protobuf:"varint,1,opt,name=num_contentions,json=numContentions,proto3" json:"num_contentions,omitempty"`
+ // The length of the current contention wait cycle.
+ CurrentWaitCycles uint64 `protobuf:"varint,2,opt,name=current_wait_cycles,json=currentWaitCycles,proto3" json:"current_wait_cycles,omitempty"`
+ // The lifetime total of all contention wait cycles.
+ LifetimeWaitCycles uint64 `protobuf:"varint,3,opt,name=lifetime_wait_cycles,json=lifetimeWaitCycles,proto3" json:"lifetime_wait_cycles,omitempty"`
+}
+
+func (x *MutexStats) Reset() {
+ *x = MutexStats{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MutexStats) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MutexStats) ProtoMessage() {}
+
+func (x *MutexStats) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MutexStats.ProtoReflect.Descriptor instead.
+func (*MutexStats) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MutexStats) GetNumContentions() uint64 {
+ if x != nil {
+ return x.NumContentions
+ }
+ return 0
+}
+
+func (x *MutexStats) GetCurrentWaitCycles() uint64 {
+ if x != nil {
+ return x.CurrentWaitCycles
+ }
+ return 0
+}
+
+func (x *MutexStats) GetLifetimeWaitCycles() uint64 {
+ if x != nil {
+ return x.LifetimeWaitCycles
+ }
+ return 0
+}
+
+var File_envoy_admin_v3_mutex_stats_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_mutex_stats_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x53, 0x74,
+ 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6e, 0x75,
+ 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, 0x63,
+ 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14,
+ 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79,
+ 0x63, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x6c, 0x69, 0x66, 0x65,
+ 0x74, 0x69, 0x6d, 0x65, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x3a, 0x25,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x75, 0x74, 0x65, 0x78,
+ 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a,
+ 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4d,
+ 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_mutex_stats_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_mutex_stats_proto_rawDescData = file_envoy_admin_v3_mutex_stats_proto_rawDesc
+)
+
+func file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_mutex_stats_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_mutex_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_mutex_stats_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_mutex_stats_proto_rawDescData
+}
+
+var file_envoy_admin_v3_mutex_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_mutex_stats_proto_goTypes = []interface{}{
+ (*MutexStats)(nil), // 0: envoy.admin.v3.MutexStats
+}
+var file_envoy_admin_v3_mutex_stats_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_mutex_stats_proto_init() }
+func file_envoy_admin_v3_mutex_stats_proto_init() {
+ if File_envoy_admin_v3_mutex_stats_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_mutex_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MutexStats); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_mutex_stats_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_mutex_stats_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_mutex_stats_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_mutex_stats_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_mutex_stats_proto = out.File
+ file_envoy_admin_v3_mutex_stats_proto_rawDesc = nil
+ file_envoy_admin_v3_mutex_stats_proto_goTypes = nil
+ file_envoy_admin_v3_mutex_stats_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go
new file mode 100644
index 000000000..236524c54
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go
@@ -0,0 +1,142 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MutexStats with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *MutexStats) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MutexStats with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in MutexStatsMultiError, or
+// nil if none found.
+func (m *MutexStats) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MutexStats) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for NumContentions
+
+ // no validation rules for CurrentWaitCycles
+
+ // no validation rules for LifetimeWaitCycles
+
+ if len(errors) > 0 {
+ return MutexStatsMultiError(errors)
+ }
+
+ return nil
+}
+
+// MutexStatsMultiError is an error wrapping multiple validation errors
+// returned by MutexStats.ValidateAll() if the designated constraints aren't met.
+type MutexStatsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MutexStatsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MutexStatsMultiError) AllErrors() []error { return m }
+
+// MutexStatsValidationError is the validation error returned by
+// MutexStats.Validate if the designated constraints aren't met.
+type MutexStatsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MutexStatsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MutexStatsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MutexStatsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MutexStatsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MutexStatsValidationError) ErrorName() string { return "MutexStatsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MutexStatsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMutexStats.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MutexStatsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MutexStatsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go
new file mode 100644
index 000000000..4318cbc99
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go
@@ -0,0 +1,86 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *MutexStats) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MutexStats) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MutexStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LifetimeWaitCycles != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LifetimeWaitCycles))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.CurrentWaitCycles != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CurrentWaitCycles))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NumContentions != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumContentions))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MutexStats) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NumContentions != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.NumContentions))
+ }
+ if m.CurrentWaitCycles != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.CurrentWaitCycles))
+ }
+ if m.LifetimeWaitCycles != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.LifetimeWaitCycles))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go
new file mode 100644
index 000000000..fa32074d9
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go
@@ -0,0 +1,987 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ServerInfo_State int32
+
+const (
+ // Server is live and serving traffic.
+ ServerInfo_LIVE ServerInfo_State = 0
+ // Server is draining listeners in response to external health checks failing.
+ ServerInfo_DRAINING ServerInfo_State = 1
+ // Server has not yet completed cluster manager initialization.
+ ServerInfo_PRE_INITIALIZING ServerInfo_State = 2
+ // Server is running the cluster manager initialization callbacks (e.g., RDS).
+ ServerInfo_INITIALIZING ServerInfo_State = 3
+)
+
+// Enum value maps for ServerInfo_State.
+var (
+ ServerInfo_State_name = map[int32]string{
+ 0: "LIVE",
+ 1: "DRAINING",
+ 2: "PRE_INITIALIZING",
+ 3: "INITIALIZING",
+ }
+ ServerInfo_State_value = map[string]int32{
+ "LIVE": 0,
+ "DRAINING": 1,
+ "PRE_INITIALIZING": 2,
+ "INITIALIZING": 3,
+ }
+)
+
+func (x ServerInfo_State) Enum() *ServerInfo_State {
+ p := new(ServerInfo_State)
+ *p = x
+ return p
+}
+
+func (x ServerInfo_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServerInfo_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[0].Descriptor()
+}
+
+func (ServerInfo_State) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[0]
+}
+
+func (x ServerInfo_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServerInfo_State.Descriptor instead.
+func (ServerInfo_State) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type CommandLineOptions_IpVersion int32
+
+const (
+ CommandLineOptions_v4 CommandLineOptions_IpVersion = 0
+ CommandLineOptions_v6 CommandLineOptions_IpVersion = 1
+)
+
+// Enum value maps for CommandLineOptions_IpVersion.
+var (
+ CommandLineOptions_IpVersion_name = map[int32]string{
+ 0: "v4",
+ 1: "v6",
+ }
+ CommandLineOptions_IpVersion_value = map[string]int32{
+ "v4": 0,
+ "v6": 1,
+ }
+)
+
+func (x CommandLineOptions_IpVersion) Enum() *CommandLineOptions_IpVersion {
+ p := new(CommandLineOptions_IpVersion)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_IpVersion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_IpVersion) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[1].Descriptor()
+}
+
+func (CommandLineOptions_IpVersion) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[1]
+}
+
+func (x CommandLineOptions_IpVersion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_IpVersion.Descriptor instead.
+func (CommandLineOptions_IpVersion) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type CommandLineOptions_Mode int32
+
+const (
+ // Validate configs and then serve traffic normally.
+ CommandLineOptions_Serve CommandLineOptions_Mode = 0
+ // Validate configs and exit.
+ CommandLineOptions_Validate CommandLineOptions_Mode = 1
+ // Completely load and initialize the config, and then exit without running the listener loop.
+ CommandLineOptions_InitOnly CommandLineOptions_Mode = 2
+)
+
+// Enum value maps for CommandLineOptions_Mode.
+var (
+ CommandLineOptions_Mode_name = map[int32]string{
+ 0: "Serve",
+ 1: "Validate",
+ 2: "InitOnly",
+ }
+ CommandLineOptions_Mode_value = map[string]int32{
+ "Serve": 0,
+ "Validate": 1,
+ "InitOnly": 2,
+ }
+)
+
+func (x CommandLineOptions_Mode) Enum() *CommandLineOptions_Mode {
+ p := new(CommandLineOptions_Mode)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_Mode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_Mode) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[2].Descriptor()
+}
+
+func (CommandLineOptions_Mode) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[2]
+}
+
+func (x CommandLineOptions_Mode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_Mode.Descriptor instead.
+func (CommandLineOptions_Mode) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CommandLineOptions_DrainStrategy int32
+
+const (
+ // Gradually discourage connections over the course of the drain period.
+ CommandLineOptions_Gradual CommandLineOptions_DrainStrategy = 0
+ // Discourage all connections for the duration of the drain sequence.
+ CommandLineOptions_Immediate CommandLineOptions_DrainStrategy = 1
+)
+
+// Enum value maps for CommandLineOptions_DrainStrategy.
+var (
+ CommandLineOptions_DrainStrategy_name = map[int32]string{
+ 0: "Gradual",
+ 1: "Immediate",
+ }
+ CommandLineOptions_DrainStrategy_value = map[string]int32{
+ "Gradual": 0,
+ "Immediate": 1,
+ }
+)
+
+func (x CommandLineOptions_DrainStrategy) Enum() *CommandLineOptions_DrainStrategy {
+ p := new(CommandLineOptions_DrainStrategy)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_DrainStrategy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_DrainStrategy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[3].Descriptor()
+}
+
+func (CommandLineOptions_DrainStrategy) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[3]
+}
+
+func (x CommandLineOptions_DrainStrategy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_DrainStrategy.Descriptor instead.
+func (CommandLineOptions_DrainStrategy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 2}
+}
+
+// Proto representation of the value returned by /server_info, containing
+// server version/server status information.
+// [#next-free-field: 8]
+type ServerInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Server version.
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // State of the server.
+ State ServerInfo_State `protobuf:"varint,2,opt,name=state,proto3,enum=envoy.admin.v3.ServerInfo_State" json:"state,omitempty"`
+ // Uptime since current epoch was started.
+ UptimeCurrentEpoch *durationpb.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"`
+ // Uptime since the start of the first epoch.
+ UptimeAllEpochs *durationpb.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"`
+ // Hot restart version.
+ HotRestartVersion string `protobuf:"bytes,5,opt,name=hot_restart_version,json=hotRestartVersion,proto3" json:"hot_restart_version,omitempty"`
+ // Command line options the server is currently running with.
+ CommandLineOptions *CommandLineOptions `protobuf:"bytes,6,opt,name=command_line_options,json=commandLineOptions,proto3" json:"command_line_options,omitempty"`
+ // Populated node identity of this server.
+ Node *v3.Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"`
+}
+
+func (x *ServerInfo) Reset() {
+ *x = ServerInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerInfo) ProtoMessage() {}
+
+func (x *ServerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
+func (*ServerInfo) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerInfo) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *ServerInfo) GetState() ServerInfo_State {
+ if x != nil {
+ return x.State
+ }
+ return ServerInfo_LIVE
+}
+
+func (x *ServerInfo) GetUptimeCurrentEpoch() *durationpb.Duration {
+ if x != nil {
+ return x.UptimeCurrentEpoch
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetUptimeAllEpochs() *durationpb.Duration {
+ if x != nil {
+ return x.UptimeAllEpochs
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetHotRestartVersion() string {
+ if x != nil {
+ return x.HotRestartVersion
+ }
+ return ""
+}
+
+func (x *ServerInfo) GetCommandLineOptions() *CommandLineOptions {
+ if x != nil {
+ return x.CommandLineOptions
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+// [#next-free-field: 42]
+type CommandLineOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // See :option:`--base-id` for details.
+ BaseId uint64 `protobuf:"varint,1,opt,name=base_id,json=baseId,proto3" json:"base_id,omitempty"`
+ // See :option:`--use-dynamic-base-id` for details.
+ UseDynamicBaseId bool `protobuf:"varint,31,opt,name=use_dynamic_base_id,json=useDynamicBaseId,proto3" json:"use_dynamic_base_id,omitempty"`
+ // See :option:`--skip-hot-restart-on-no-parent` for details.
+ SkipHotRestartOnNoParent bool `protobuf:"varint,39,opt,name=skip_hot_restart_on_no_parent,json=skipHotRestartOnNoParent,proto3" json:"skip_hot_restart_on_no_parent,omitempty"`
+ // See :option:`--skip-hot-restart-parent-stats` for details.
+ SkipHotRestartParentStats bool `protobuf:"varint,40,opt,name=skip_hot_restart_parent_stats,json=skipHotRestartParentStats,proto3" json:"skip_hot_restart_parent_stats,omitempty"`
+ // See :option:`--base-id-path` for details.
+ BaseIdPath string `protobuf:"bytes,32,opt,name=base_id_path,json=baseIdPath,proto3" json:"base_id_path,omitempty"`
+ // See :option:`--concurrency` for details.
+ Concurrency uint32 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // See :option:`--config-path` for details.
+ ConfigPath string `protobuf:"bytes,3,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"`
+ // See :option:`--config-yaml` for details.
+ ConfigYaml string `protobuf:"bytes,4,opt,name=config_yaml,json=configYaml,proto3" json:"config_yaml,omitempty"`
+ // See :option:`--allow-unknown-static-fields` for details.
+ AllowUnknownStaticFields bool `protobuf:"varint,5,opt,name=allow_unknown_static_fields,json=allowUnknownStaticFields,proto3" json:"allow_unknown_static_fields,omitempty"`
+ // See :option:`--reject-unknown-dynamic-fields` for details.
+ RejectUnknownDynamicFields bool `protobuf:"varint,26,opt,name=reject_unknown_dynamic_fields,json=rejectUnknownDynamicFields,proto3" json:"reject_unknown_dynamic_fields,omitempty"`
+ // See :option:`--ignore-unknown-dynamic-fields` for details.
+ IgnoreUnknownDynamicFields bool `protobuf:"varint,30,opt,name=ignore_unknown_dynamic_fields,json=ignoreUnknownDynamicFields,proto3" json:"ignore_unknown_dynamic_fields,omitempty"`
+ // See :option:`--skip-deprecated-logs` for details.
+ SkipDeprecatedLogs bool `protobuf:"varint,41,opt,name=skip_deprecated_logs,json=skipDeprecatedLogs,proto3" json:"skip_deprecated_logs,omitempty"`
+ // See :option:`--admin-address-path` for details.
+ AdminAddressPath string `protobuf:"bytes,6,opt,name=admin_address_path,json=adminAddressPath,proto3" json:"admin_address_path,omitempty"`
+ // See :option:`--local-address-ip-version` for details.
+ LocalAddressIpVersion CommandLineOptions_IpVersion `protobuf:"varint,7,opt,name=local_address_ip_version,json=localAddressIpVersion,proto3,enum=envoy.admin.v3.CommandLineOptions_IpVersion" json:"local_address_ip_version,omitempty"`
+ // See :option:`--log-level` for details.
+ LogLevel string `protobuf:"bytes,8,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"`
+ // See :option:`--component-log-level` for details.
+ ComponentLogLevel string `protobuf:"bytes,9,opt,name=component_log_level,json=componentLogLevel,proto3" json:"component_log_level,omitempty"`
+ // See :option:`--log-format` for details.
+ LogFormat string `protobuf:"bytes,10,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"`
+ // See :option:`--log-format-escaped` for details.
+ LogFormatEscaped bool `protobuf:"varint,27,opt,name=log_format_escaped,json=logFormatEscaped,proto3" json:"log_format_escaped,omitempty"`
+ // See :option:`--log-path` for details.
+ LogPath string `protobuf:"bytes,11,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"`
+ // See :option:`--service-cluster` for details.
+ ServiceCluster string `protobuf:"bytes,13,opt,name=service_cluster,json=serviceCluster,proto3" json:"service_cluster,omitempty"`
+ // See :option:`--service-node` for details.
+ ServiceNode string `protobuf:"bytes,14,opt,name=service_node,json=serviceNode,proto3" json:"service_node,omitempty"`
+ // See :option:`--service-zone` for details.
+ ServiceZone string `protobuf:"bytes,15,opt,name=service_zone,json=serviceZone,proto3" json:"service_zone,omitempty"`
+ // See :option:`--file-flush-interval-msec` for details.
+ FileFlushInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"`
+ // See :option:`--drain-time-s` for details.
+ DrainTime *durationpb.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"`
+ // See :option:`--drain-strategy` for details.
+ DrainStrategy CommandLineOptions_DrainStrategy `protobuf:"varint,33,opt,name=drain_strategy,json=drainStrategy,proto3,enum=envoy.admin.v3.CommandLineOptions_DrainStrategy" json:"drain_strategy,omitempty"`
+ // See :option:`--parent-shutdown-time-s` for details.
+ ParentShutdownTime *durationpb.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"`
+ // See :option:`--mode` for details.
+ Mode CommandLineOptions_Mode `protobuf:"varint,19,opt,name=mode,proto3,enum=envoy.admin.v3.CommandLineOptions_Mode" json:"mode,omitempty"`
+ // See :option:`--disable-hot-restart` for details.
+ DisableHotRestart bool `protobuf:"varint,22,opt,name=disable_hot_restart,json=disableHotRestart,proto3" json:"disable_hot_restart,omitempty"`
+ // See :option:`--enable-mutex-tracing` for details.
+ EnableMutexTracing bool `protobuf:"varint,23,opt,name=enable_mutex_tracing,json=enableMutexTracing,proto3" json:"enable_mutex_tracing,omitempty"`
+ // See :option:`--restart-epoch` for details.
+ RestartEpoch uint32 `protobuf:"varint,24,opt,name=restart_epoch,json=restartEpoch,proto3" json:"restart_epoch,omitempty"`
+ // See :option:`--cpuset-threads` for details.
+ CpusetThreads bool `protobuf:"varint,25,opt,name=cpuset_threads,json=cpusetThreads,proto3" json:"cpuset_threads,omitempty"`
+ // See :option:`--disable-extensions` for details.
+ DisabledExtensions []string `protobuf:"bytes,28,rep,name=disabled_extensions,json=disabledExtensions,proto3" json:"disabled_extensions,omitempty"`
+ // See :option:`--enable-fine-grain-logging` for details.
+ EnableFineGrainLogging bool `protobuf:"varint,34,opt,name=enable_fine_grain_logging,json=enableFineGrainLogging,proto3" json:"enable_fine_grain_logging,omitempty"`
+ // See :option:`--socket-path` for details.
+ SocketPath string `protobuf:"bytes,35,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"`
+ // See :option:`--socket-mode` for details.
+ SocketMode uint32 `protobuf:"varint,36,opt,name=socket_mode,json=socketMode,proto3" json:"socket_mode,omitempty"`
+ // See :option:`--enable-core-dump` for details.
+ EnableCoreDump bool `protobuf:"varint,37,opt,name=enable_core_dump,json=enableCoreDump,proto3" json:"enable_core_dump,omitempty"`
+ // See :option:`--stats-tag` for details.
+ StatsTag []string `protobuf:"bytes,38,rep,name=stats_tag,json=statsTag,proto3" json:"stats_tag,omitempty"`
+}
+
+func (x *CommandLineOptions) Reset() {
+ *x = CommandLineOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CommandLineOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CommandLineOptions) ProtoMessage() {}
+
+func (x *CommandLineOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CommandLineOptions.ProtoReflect.Descriptor instead.
+func (*CommandLineOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CommandLineOptions) GetBaseId() uint64 {
+ if x != nil {
+ return x.BaseId
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetUseDynamicBaseId() bool {
+ if x != nil {
+ return x.UseDynamicBaseId
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipHotRestartOnNoParent() bool {
+ if x != nil {
+ return x.SkipHotRestartOnNoParent
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipHotRestartParentStats() bool {
+ if x != nil {
+ return x.SkipHotRestartParentStats
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetBaseIdPath() string {
+ if x != nil {
+ return x.BaseIdPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetConfigPath() string {
+ if x != nil {
+ return x.ConfigPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetConfigYaml() string {
+ if x != nil {
+ return x.ConfigYaml
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetAllowUnknownStaticFields() bool {
+ if x != nil {
+ return x.AllowUnknownStaticFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetRejectUnknownDynamicFields() bool {
+ if x != nil {
+ return x.RejectUnknownDynamicFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetIgnoreUnknownDynamicFields() bool {
+ if x != nil {
+ return x.IgnoreUnknownDynamicFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipDeprecatedLogs() bool {
+ if x != nil {
+ return x.SkipDeprecatedLogs
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetAdminAddressPath() string {
+ if x != nil {
+ return x.AdminAddressPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLocalAddressIpVersion() CommandLineOptions_IpVersion {
+ if x != nil {
+ return x.LocalAddressIpVersion
+ }
+ return CommandLineOptions_v4
+}
+
+func (x *CommandLineOptions) GetLogLevel() string {
+ if x != nil {
+ return x.LogLevel
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetComponentLogLevel() string {
+ if x != nil {
+ return x.ComponentLogLevel
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLogFormat() string {
+ if x != nil {
+ return x.LogFormat
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLogFormatEscaped() bool {
+ if x != nil {
+ return x.LogFormatEscaped
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetLogPath() string {
+ if x != nil {
+ return x.LogPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceCluster() string {
+ if x != nil {
+ return x.ServiceCluster
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceNode() string {
+ if x != nil {
+ return x.ServiceNode
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceZone() string {
+ if x != nil {
+ return x.ServiceZone
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetFileFlushInterval() *durationpb.Duration {
+ if x != nil {
+ return x.FileFlushInterval
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetDrainTime() *durationpb.Duration {
+ if x != nil {
+ return x.DrainTime
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetDrainStrategy() CommandLineOptions_DrainStrategy {
+ if x != nil {
+ return x.DrainStrategy
+ }
+ return CommandLineOptions_Gradual
+}
+
+func (x *CommandLineOptions) GetParentShutdownTime() *durationpb.Duration {
+ if x != nil {
+ return x.ParentShutdownTime
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetMode() CommandLineOptions_Mode {
+ if x != nil {
+ return x.Mode
+ }
+ return CommandLineOptions_Serve
+}
+
+func (x *CommandLineOptions) GetDisableHotRestart() bool {
+ if x != nil {
+ return x.DisableHotRestart
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetEnableMutexTracing() bool {
+ if x != nil {
+ return x.EnableMutexTracing
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetRestartEpoch() uint32 {
+ if x != nil {
+ return x.RestartEpoch
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetCpusetThreads() bool {
+ if x != nil {
+ return x.CpusetThreads
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetDisabledExtensions() []string {
+ if x != nil {
+ return x.DisabledExtensions
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetEnableFineGrainLogging() bool {
+ if x != nil {
+ return x.EnableFineGrainLogging
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSocketPath() string {
+ if x != nil {
+ return x.SocketPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetSocketMode() uint32 {
+ if x != nil {
+ return x.SocketMode
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetEnableCoreDump() bool {
+ if x != nil {
+ return x.EnableCoreDump
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetStatsTag() []string {
+ if x != nil {
+ return x.StatsTag
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_server_info_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_server_info_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, 0x04, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x70,
+ 0x6f, 0x63, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x61, 0x6c,
+ 0x6c, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x75, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x41, 0x6c, 0x6c, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x68, 0x6f,
+ 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x14, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e,
+ 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65,
+ 0x22, 0x47, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x56,
+ 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10,
+ 0x01, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x52, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c,
+ 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49,
+ 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20,
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
+ 0x22, 0x90, 0x10, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x62, 0x61, 0x73, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64,
+ 0x12, 0x2d, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f,
+ 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75,
+ 0x73, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12,
+ 0x3f, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, 0x52,
+ 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x6e, 0x4e, 0x6f, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x40, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73,
+ 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74,
+ 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a,
+ 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x30, 0x0a,
+ 0x14, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x29, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x6b, 0x69,
+ 0x70, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x73, 0x12,
+ 0x2c, 0x0a, 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a,
+ 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69,
+ 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c,
+ 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
+ 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65,
+ 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f,
+ 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19,
+ 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f,
+ 0x64, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65,
+ 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18,
+ 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a,
+ 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18,
+ 0x21, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69,
+ 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53,
+ 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74,
+ 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65,
+ 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f,
+ 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64,
+ 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78,
+ 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69,
+ 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70,
+ 0x6f, 0x63, 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65,
+ 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0d, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f,
+ 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x39, 0x0a, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67,
+ 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72,
+ 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
+ 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0a, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70,
+ 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f,
+ 0x72, 0x65, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f,
+ 0x74, 0x61, 0x67, 0x18, 0x26, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73,
+ 0x54, 0x61, 0x67, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x06, 0x0a, 0x02, 0x76, 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01,
+ 0x22, 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10,
+ 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22,
+ 0x2b, 0x0a, 0x0d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79,
+ 0x12, 0x0b, 0x0a, 0x07, 0x47, 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
+ 0x09, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64,
+ 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10,
+ 0x0d, 0x4a, 0x04, 0x08, 0x14, 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08,
+ 0x1d, 0x10, 0x1e, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10,
+ 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x52, 0x11, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69,
+ 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_server_info_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_server_info_proto_rawDescData = file_envoy_admin_v3_server_info_proto_rawDesc
+)
+
+func file_envoy_admin_v3_server_info_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_server_info_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_server_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_server_info_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_server_info_proto_rawDescData
+}
+
+var file_envoy_admin_v3_server_info_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_envoy_admin_v3_server_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_server_info_proto_goTypes = []interface{}{
+ (ServerInfo_State)(0), // 0: envoy.admin.v3.ServerInfo.State
+ (CommandLineOptions_IpVersion)(0), // 1: envoy.admin.v3.CommandLineOptions.IpVersion
+ (CommandLineOptions_Mode)(0), // 2: envoy.admin.v3.CommandLineOptions.Mode
+ (CommandLineOptions_DrainStrategy)(0), // 3: envoy.admin.v3.CommandLineOptions.DrainStrategy
+ (*ServerInfo)(nil), // 4: envoy.admin.v3.ServerInfo
+ (*CommandLineOptions)(nil), // 5: envoy.admin.v3.CommandLineOptions
+ (*durationpb.Duration)(nil), // 6: google.protobuf.Duration
+ (*v3.Node)(nil), // 7: envoy.config.core.v3.Node
+}
+var file_envoy_admin_v3_server_info_proto_depIdxs = []int32{
+ 0, // 0: envoy.admin.v3.ServerInfo.state:type_name -> envoy.admin.v3.ServerInfo.State
+ 6, // 1: envoy.admin.v3.ServerInfo.uptime_current_epoch:type_name -> google.protobuf.Duration
+ 6, // 2: envoy.admin.v3.ServerInfo.uptime_all_epochs:type_name -> google.protobuf.Duration
+ 5, // 3: envoy.admin.v3.ServerInfo.command_line_options:type_name -> envoy.admin.v3.CommandLineOptions
+ 7, // 4: envoy.admin.v3.ServerInfo.node:type_name -> envoy.config.core.v3.Node
+ 1, // 5: envoy.admin.v3.CommandLineOptions.local_address_ip_version:type_name -> envoy.admin.v3.CommandLineOptions.IpVersion
+ 6, // 6: envoy.admin.v3.CommandLineOptions.file_flush_interval:type_name -> google.protobuf.Duration
+ 6, // 7: envoy.admin.v3.CommandLineOptions.drain_time:type_name -> google.protobuf.Duration
+ 3, // 8: envoy.admin.v3.CommandLineOptions.drain_strategy:type_name -> envoy.admin.v3.CommandLineOptions.DrainStrategy
+ 6, // 9: envoy.admin.v3.CommandLineOptions.parent_shutdown_time:type_name -> google.protobuf.Duration
+ 2, // 10: envoy.admin.v3.CommandLineOptions.mode:type_name -> envoy.admin.v3.CommandLineOptions.Mode
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_server_info_proto_init() }
+func file_envoy_admin_v3_server_info_proto_init() {
+ if File_envoy_admin_v3_server_info_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_server_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_server_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CommandLineOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_server_info_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_server_info_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_server_info_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_server_info_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_server_info_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_server_info_proto = out.File
+ file_envoy_admin_v3_server_info_proto_rawDesc = nil
+ file_envoy_admin_v3_server_info_proto_goTypes = nil
+ file_envoy_admin_v3_server_info_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go
new file mode 100644
index 000000000..516156241
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go
@@ -0,0 +1,511 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ServerInfo with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ServerInfo) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerInfo with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ServerInfoMultiError, or
+// nil if none found.
+func (m *ServerInfo) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerInfo) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Version
+
+ // no validation rules for State
+
+ if all {
+ switch v := interface{}(m.GetUptimeCurrentEpoch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUptimeCurrentEpoch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUptimeAllEpochs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUptimeAllEpochs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for HotRestartVersion
+
+ if all {
+ switch v := interface{}(m.GetCommandLineOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCommandLineOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetNode()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ServerInfoMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerInfoMultiError is an error wrapping multiple validation errors
+// returned by ServerInfo.ValidateAll() if the designated constraints aren't met.
+type ServerInfoMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerInfoMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerInfoMultiError) AllErrors() []error { return m }
+
+// ServerInfoValidationError is the validation error returned by
+// ServerInfo.Validate if the designated constraints aren't met.
+type ServerInfoValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerInfoValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerInfoValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerInfoValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerInfoValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerInfoValidationError) ErrorName() string { return "ServerInfoValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ServerInfoValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerInfo.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerInfoValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerInfoValidationError{}
+
+// Validate checks the field values on CommandLineOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CommandLineOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CommandLineOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CommandLineOptionsMultiError, or nil if none found.
+func (m *CommandLineOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CommandLineOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BaseId
+
+ // no validation rules for UseDynamicBaseId
+
+ // no validation rules for SkipHotRestartOnNoParent
+
+ // no validation rules for SkipHotRestartParentStats
+
+ // no validation rules for BaseIdPath
+
+ // no validation rules for Concurrency
+
+ // no validation rules for ConfigPath
+
+ // no validation rules for ConfigYaml
+
+ // no validation rules for AllowUnknownStaticFields
+
+ // no validation rules for RejectUnknownDynamicFields
+
+ // no validation rules for IgnoreUnknownDynamicFields
+
+ // no validation rules for SkipDeprecatedLogs
+
+ // no validation rules for AdminAddressPath
+
+ // no validation rules for LocalAddressIpVersion
+
+ // no validation rules for LogLevel
+
+ // no validation rules for ComponentLogLevel
+
+ // no validation rules for LogFormat
+
+ // no validation rules for LogFormatEscaped
+
+ // no validation rules for LogPath
+
+ // no validation rules for ServiceCluster
+
+ // no validation rules for ServiceNode
+
+ // no validation rules for ServiceZone
+
+ if all {
+ switch v := interface{}(m.GetFileFlushInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFileFlushInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDrainTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDrainTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for DrainStrategy
+
+ if all {
+ switch v := interface{}(m.GetParentShutdownTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParentShutdownTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Mode
+
+ // no validation rules for DisableHotRestart
+
+ // no validation rules for EnableMutexTracing
+
+ // no validation rules for RestartEpoch
+
+ // no validation rules for CpusetThreads
+
+ // no validation rules for EnableFineGrainLogging
+
+ // no validation rules for SocketPath
+
+ // no validation rules for SocketMode
+
+ // no validation rules for EnableCoreDump
+
+ if len(errors) > 0 {
+ return CommandLineOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CommandLineOptionsMultiError is an error wrapping multiple validation errors
+// returned by CommandLineOptions.ValidateAll() if the designated constraints
+// aren't met.
+type CommandLineOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CommandLineOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CommandLineOptionsMultiError) AllErrors() []error { return m }
+
+// CommandLineOptionsValidationError is the validation error returned by
+// CommandLineOptions.Validate if the designated constraints aren't met.
+type CommandLineOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CommandLineOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CommandLineOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CommandLineOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CommandLineOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CommandLineOptionsValidationError) ErrorName() string {
+ return "CommandLineOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CommandLineOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCommandLineOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CommandLineOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CommandLineOptionsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go
new file mode 100644
index 000000000..ca7e4ede3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go
@@ -0,0 +1,686 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ServerInfo) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServerInfo) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ServerInfo) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Node != nil {
+ if vtmsg, ok := interface{}(m.Node).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Node)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.CommandLineOptions != nil {
+ size, err := m.CommandLineOptions.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.HotRestartVersion) > 0 {
+ i -= len(m.HotRestartVersion)
+ copy(dAtA[i:], m.HotRestartVersion)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HotRestartVersion)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.UptimeAllEpochs != nil {
+ size, err := (*durationpb.Duration)(m.UptimeAllEpochs).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.UptimeCurrentEpoch != nil {
+ size, err := (*durationpb.Duration)(m.UptimeCurrentEpoch).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.State != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CommandLineOptions) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CommandLineOptions) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CommandLineOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.SkipDeprecatedLogs {
+ i--
+ if m.SkipDeprecatedLogs {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc8
+ }
+ if m.SkipHotRestartParentStats {
+ i--
+ if m.SkipHotRestartParentStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc0
+ }
+ if m.SkipHotRestartOnNoParent {
+ i--
+ if m.SkipHotRestartOnNoParent {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb8
+ }
+ if len(m.StatsTag) > 0 {
+ for iNdEx := len(m.StatsTag) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.StatsTag[iNdEx])
+ copy(dAtA[i:], m.StatsTag[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatsTag[iNdEx])))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb2
+ }
+ }
+ if m.EnableCoreDump {
+ i--
+ if m.EnableCoreDump {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa8
+ }
+ if m.SocketMode != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SocketMode))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa0
+ }
+ if len(m.SocketPath) > 0 {
+ i -= len(m.SocketPath)
+ copy(dAtA[i:], m.SocketPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SocketPath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.EnableFineGrainLogging {
+ i--
+ if m.EnableFineGrainLogging {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x90
+ }
+ if m.DrainStrategy != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainStrategy))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x88
+ }
+ if len(m.BaseIdPath) > 0 {
+ i -= len(m.BaseIdPath)
+ copy(dAtA[i:], m.BaseIdPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BaseIdPath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.UseDynamicBaseId {
+ i--
+ if m.UseDynamicBaseId {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf8
+ }
+ if m.IgnoreUnknownDynamicFields {
+ i--
+ if m.IgnoreUnknownDynamicFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf0
+ }
+ if len(m.DisabledExtensions) > 0 {
+ for iNdEx := len(m.DisabledExtensions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.DisabledExtensions[iNdEx])
+ copy(dAtA[i:], m.DisabledExtensions[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisabledExtensions[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe2
+ }
+ }
+ if m.LogFormatEscaped {
+ i--
+ if m.LogFormatEscaped {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd8
+ }
+ if m.RejectUnknownDynamicFields {
+ i--
+ if m.RejectUnknownDynamicFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd0
+ }
+ if m.CpusetThreads {
+ i--
+ if m.CpusetThreads {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc8
+ }
+ if m.RestartEpoch != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RestartEpoch))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc0
+ }
+ if m.EnableMutexTracing {
+ i--
+ if m.EnableMutexTracing {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb8
+ }
+ if m.DisableHotRestart {
+ i--
+ if m.DisableHotRestart {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb0
+ }
+ if m.Mode != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x98
+ }
+ if m.ParentShutdownTime != nil {
+ size, err := (*durationpb.Duration)(m.ParentShutdownTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.DrainTime != nil {
+ size, err := (*durationpb.Duration)(m.DrainTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.FileFlushInterval != nil {
+ size, err := (*durationpb.Duration)(m.FileFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.ServiceZone) > 0 {
+ i -= len(m.ServiceZone)
+ copy(dAtA[i:], m.ServiceZone)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceZone)))
+ i--
+ dAtA[i] = 0x7a
+ }
+ if len(m.ServiceNode) > 0 {
+ i -= len(m.ServiceNode)
+ copy(dAtA[i:], m.ServiceNode)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceNode)))
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.ServiceCluster) > 0 {
+ i -= len(m.ServiceCluster)
+ copy(dAtA[i:], m.ServiceCluster)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceCluster)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.LogPath) > 0 {
+ i -= len(m.LogPath)
+ copy(dAtA[i:], m.LogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogPath)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.LogFormat) > 0 {
+ i -= len(m.LogFormat)
+ copy(dAtA[i:], m.LogFormat)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogFormat)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.ComponentLogLevel) > 0 {
+ i -= len(m.ComponentLogLevel)
+ copy(dAtA[i:], m.ComponentLogLevel)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ComponentLogLevel)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.LogLevel) > 0 {
+ i -= len(m.LogLevel)
+ copy(dAtA[i:], m.LogLevel)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogLevel)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.LocalAddressIpVersion != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LocalAddressIpVersion))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.AdminAddressPath) > 0 {
+ i -= len(m.AdminAddressPath)
+ copy(dAtA[i:], m.AdminAddressPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AdminAddressPath)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.AllowUnknownStaticFields {
+ i--
+ if m.AllowUnknownStaticFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if len(m.ConfigYaml) > 0 {
+ i -= len(m.ConfigYaml)
+ copy(dAtA[i:], m.ConfigYaml)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigYaml)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ConfigPath) > 0 {
+ i -= len(m.ConfigPath)
+ copy(dAtA[i:], m.ConfigPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigPath)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Concurrency != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Concurrency))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.BaseId != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BaseId))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServerInfo) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.State != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.State))
+ }
+ if m.UptimeCurrentEpoch != nil {
+ l = (*durationpb.Duration)(m.UptimeCurrentEpoch).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UptimeAllEpochs != nil {
+ l = (*durationpb.Duration)(m.UptimeAllEpochs).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.HotRestartVersion)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CommandLineOptions != nil {
+ l = m.CommandLineOptions.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Node != nil {
+ if size, ok := interface{}(m.Node).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Node)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CommandLineOptions) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BaseId != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.BaseId))
+ }
+ if m.Concurrency != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Concurrency))
+ }
+ l = len(m.ConfigPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ConfigYaml)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AllowUnknownStaticFields {
+ n += 2
+ }
+ l = len(m.AdminAddressPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LocalAddressIpVersion != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.LocalAddressIpVersion))
+ }
+ l = len(m.LogLevel)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ComponentLogLevel)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LogFormat)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceCluster)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceNode)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceZone)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.FileFlushInterval != nil {
+ l = (*durationpb.Duration)(m.FileFlushInterval).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainTime != nil {
+ l = (*durationpb.Duration)(m.DrainTime).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ParentShutdownTime != nil {
+ l = (*durationpb.Duration)(m.ParentShutdownTime).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Mode != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.Mode))
+ }
+ if m.DisableHotRestart {
+ n += 3
+ }
+ if m.EnableMutexTracing {
+ n += 3
+ }
+ if m.RestartEpoch != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.RestartEpoch))
+ }
+ if m.CpusetThreads {
+ n += 3
+ }
+ if m.RejectUnknownDynamicFields {
+ n += 3
+ }
+ if m.LogFormatEscaped {
+ n += 3
+ }
+ if len(m.DisabledExtensions) > 0 {
+ for _, s := range m.DisabledExtensions {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.IgnoreUnknownDynamicFields {
+ n += 3
+ }
+ if m.UseDynamicBaseId {
+ n += 3
+ }
+ l = len(m.BaseIdPath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainStrategy != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.DrainStrategy))
+ }
+ if m.EnableFineGrainLogging {
+ n += 3
+ }
+ l = len(m.SocketPath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.SocketMode != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.SocketMode))
+ }
+ if m.EnableCoreDump {
+ n += 3
+ }
+ if len(m.StatsTag) > 0 {
+ for _, s := range m.StatsTag {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.SkipHotRestartOnNoParent {
+ n += 3
+ }
+ if m.SkipHotRestartParentStats {
+ n += 3
+ }
+ if m.SkipDeprecatedLogs {
+ n += 3
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go
new file mode 100644
index 000000000..71c429162
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The /tap admin request body that is used to configure an active tap session.
+type TapRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The opaque configuration ID used to match the configuration to a loaded extension.
+ // A tap extension configures a similar opaque ID that is used to match.
+ ConfigId string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"`
+ // The tap configuration to load.
+ TapConfig *v3.TapConfig `protobuf:"bytes,2,opt,name=tap_config,json=tapConfig,proto3" json:"tap_config,omitempty"`
+}
+
+func (x *TapRequest) Reset() {
+ *x = TapRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_tap_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TapRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TapRequest) ProtoMessage() {}
+
+func (x *TapRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_tap_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TapRequest.ProtoReflect.Descriptor instead.
+func (*TapRequest) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_tap_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TapRequest) GetConfigId() string {
+ if x != nil {
+ return x.ConfigId
+ }
+ return ""
+}
+
+func (x *TapRequest) GetTapConfig() *v3.TapConfig {
+ if x != nil {
+ return x.TapConfig
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_tap_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_tap_proto_rawDesc = []byte{
+ 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x74, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x70, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x0a,
+ 0x74, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x70, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x54, 0x61, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x71, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_tap_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_tap_proto_rawDescData = file_envoy_admin_v3_tap_proto_rawDesc
+)
+
+func file_envoy_admin_v3_tap_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_tap_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_tap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_tap_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_tap_proto_rawDescData
+}
+
+var file_envoy_admin_v3_tap_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_tap_proto_goTypes = []interface{}{
+ (*TapRequest)(nil), // 0: envoy.admin.v3.TapRequest
+ (*v3.TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig
+}
+var file_envoy_admin_v3_tap_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.TapRequest.tap_config:type_name -> envoy.config.tap.v3.TapConfig
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_tap_proto_init() }
+func file_envoy_admin_v3_tap_proto_init() {
+ if File_envoy_admin_v3_tap_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_tap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TapRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_tap_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_tap_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_tap_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_tap_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_tap_proto = out.File
+ file_envoy_admin_v3_tap_proto_rawDesc = nil
+ file_envoy_admin_v3_tap_proto_goTypes = nil
+ file_envoy_admin_v3_tap_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go
new file mode 100644
index 000000000..d524f2aef
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go
@@ -0,0 +1,187 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TapRequest with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TapRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TapRequest with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TapRequestMultiError, or
+// nil if none found.
+func (m *TapRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TapRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetConfigId()) < 1 {
+ err := TapRequestValidationError{
+ field: "ConfigId",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTapConfig() == nil {
+ err := TapRequestValidationError{
+ field: "TapConfig",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTapConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTapConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TapRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// TapRequestMultiError is an error wrapping multiple validation errors
+// returned by TapRequest.ValidateAll() if the designated constraints aren't met.
+type TapRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TapRequestMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TapRequestMultiError) AllErrors() []error { return m }
+
+// TapRequestValidationError is the validation error returned by
+// TapRequest.Validate if the designated constraints aren't met.
+type TapRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TapRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TapRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TapRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TapRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TapRequestValidationError) ErrorName() string { return "TapRequestValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TapRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTapRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TapRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TapRequestValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go
new file mode 100644
index 000000000..4524bfb4f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go
@@ -0,0 +1,106 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *TapRequest) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TapRequest) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *TapRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.TapConfig != nil {
+ if vtmsg, ok := interface{}(m.TapConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.TapConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConfigId) > 0 {
+ i -= len(m.ConfigId)
+ copy(dAtA[i:], m.ConfigId)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TapRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConfigId)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TapConfig != nil {
+ if size, ok := interface{}(m.TapConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.TapConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
new file mode 100644
index 000000000..d748e467a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
@@ -0,0 +1,159 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 189503207,
+ Name: "envoy.annotations.disallowed_by_default",
+ Tag: "varint,189503207,opt,name=disallowed_by_default",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 157299826,
+ Name: "envoy.annotations.deprecated_at_minor_version",
+ Tag: "bytes,157299826,opt,name=deprecated_at_minor_version",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 70100853,
+ Name: "envoy.annotations.disallowed_by_default_enum",
+ Tag: "varint,70100853,opt,name=disallowed_by_default_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 181198657,
+ Name: "envoy.annotations.deprecated_at_minor_version_enum",
+ Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool disallowed_by_default = 189503207;
+ E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0]
+ // The API major and minor version on which the field was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version = 157299826;
+ E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional bool disallowed_by_default_enum = 70100853;
+ E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2]
+ // The API major and minor version on which the enum value was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version_enum = 181198657;
+ E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3]
+)
+
+var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_deprecation_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69,
+ 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2,
+ 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f,
+ 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x75, 0x6d, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions
+}
+var file_envoy_annotations_deprecation_proto_depIdxs = []int32{
+ 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions
+ 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions
+ 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions
+ 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_deprecation_proto_init() }
+func file_envoy_annotations_deprecation_proto_init() {
+ if File_envoy_annotations_deprecation_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_deprecation_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs,
+ ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_deprecation_proto = out.File
+ file_envoy_annotations_deprecation_proto_rawDesc = nil
+ file_envoy_annotations_deprecation_proto_goTypes = nil
+ file_envoy_annotations_deprecation_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
new file mode 100644
index 000000000..be58aa524
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
@@ -0,0 +1,37 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
new file mode 100644
index 000000000..7ec2d7c31
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource
+ // type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+}
+
+func (x *ResourceAnnotation) Reset() {
+ *x = ResourceAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceAnnotation) ProtoMessage() {}
+
+func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead.
+func (*ResourceAnnotation) Descriptor() ([]byte, []int) {
+ return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceAnnotation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*ResourceAnnotation)(nil),
+ Field: 265073217,
+ Name: "envoy.annotations.resource",
+ Tag: "bytes,265073217,opt,name=resource",
+ Filename: "envoy/annotations/resource.proto",
+ },
+}
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // optional envoy.annotations.ResourceAnnotation resource = 265073217;
+ E_Resource = &file_envoy_annotations_resource_proto_extTypes[0]
+)
+
+var File_envoy_annotations_resource_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_resource_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1,
+ 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_annotations_resource_proto_rawDescOnce sync.Once
+ file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc
+)
+
+func file_envoy_annotations_resource_proto_rawDescGZIP() []byte {
+ file_envoy_annotations_resource_proto_rawDescOnce.Do(func() {
+ file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData)
+ })
+ return file_envoy_annotations_resource_proto_rawDescData
+}
+
+var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_annotations_resource_proto_goTypes = []interface{}{
+ (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation
+ (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions
+}
+var file_envoy_annotations_resource_proto_depIdxs = []int32{
+ 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions
+ 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_resource_proto_init() }
+func file_envoy_annotations_resource_proto_init() {
+ if File_envoy_annotations_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_resource_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs,
+ MessageInfos: file_envoy_annotations_resource_proto_msgTypes,
+ ExtensionInfos: file_envoy_annotations_resource_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_resource_proto = out.File
+ file_envoy_annotations_resource_proto_rawDesc = nil
+ file_envoy_annotations_resource_proto_goTypes = nil
+ file_envoy_annotations_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
new file mode 100644
index 000000000..2929a5813
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
@@ -0,0 +1,141 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceAnnotationMultiError, or nil if none found.
+func (m *ResourceAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Type
+
+ if len(errors) > 0 {
+ return ResourceAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceAnnotationMultiError is an error wrapping multiple validation errors
+// returned by ResourceAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceAnnotationMultiError) AllErrors() []error { return m }
+
+// ResourceAnnotationValidationError is the validation error returned by
+// ResourceAnnotation.Validate if the designated constraints aren't met.
+type ResourceAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceAnnotationValidationError) ErrorName() string {
+ return "ResourceAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceAnnotationValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go
new file mode 100644
index 000000000..324cb0916
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go
@@ -0,0 +1,73 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ResourceAnnotation) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceAnnotation) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ResourceAnnotation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceAnnotation) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go
new file mode 100644
index 000000000..f434e6d40
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go
@@ -0,0 +1,1926 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ComparisonFilter_Op int32
+
+const (
+ // =
+ ComparisonFilter_EQ ComparisonFilter_Op = 0
+ // >=
+ ComparisonFilter_GE ComparisonFilter_Op = 1
+ // <=
+ ComparisonFilter_LE ComparisonFilter_Op = 2
+)
+
+// Enum value maps for ComparisonFilter_Op.
+var (
+ ComparisonFilter_Op_name = map[int32]string{
+ 0: "EQ",
+ 1: "GE",
+ 2: "LE",
+ }
+ ComparisonFilter_Op_value = map[string]int32{
+ "EQ": 0,
+ "GE": 1,
+ "LE": 2,
+ }
+)
+
+func (x ComparisonFilter_Op) Enum() *ComparisonFilter_Op {
+ p := new(ComparisonFilter_Op)
+ *p = x
+ return p
+}
+
+func (x ComparisonFilter_Op) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ComparisonFilter_Op) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor()
+}
+
+func (ComparisonFilter_Op) Type() protoreflect.EnumType {
+ return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0]
+}
+
+func (x ComparisonFilter_Op) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ComparisonFilter_Op.Descriptor instead.
+func (ComparisonFilter_Op) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2, 0}
+}
+
+type GrpcStatusFilter_Status int32
+
+const (
+ GrpcStatusFilter_OK GrpcStatusFilter_Status = 0
+ GrpcStatusFilter_CANCELED GrpcStatusFilter_Status = 1
+ GrpcStatusFilter_UNKNOWN GrpcStatusFilter_Status = 2
+ GrpcStatusFilter_INVALID_ARGUMENT GrpcStatusFilter_Status = 3
+ GrpcStatusFilter_DEADLINE_EXCEEDED GrpcStatusFilter_Status = 4
+ GrpcStatusFilter_NOT_FOUND GrpcStatusFilter_Status = 5
+ GrpcStatusFilter_ALREADY_EXISTS GrpcStatusFilter_Status = 6
+ GrpcStatusFilter_PERMISSION_DENIED GrpcStatusFilter_Status = 7
+ GrpcStatusFilter_RESOURCE_EXHAUSTED GrpcStatusFilter_Status = 8
+ GrpcStatusFilter_FAILED_PRECONDITION GrpcStatusFilter_Status = 9
+ GrpcStatusFilter_ABORTED GrpcStatusFilter_Status = 10
+ GrpcStatusFilter_OUT_OF_RANGE GrpcStatusFilter_Status = 11
+ GrpcStatusFilter_UNIMPLEMENTED GrpcStatusFilter_Status = 12
+ GrpcStatusFilter_INTERNAL GrpcStatusFilter_Status = 13
+ GrpcStatusFilter_UNAVAILABLE GrpcStatusFilter_Status = 14
+ GrpcStatusFilter_DATA_LOSS GrpcStatusFilter_Status = 15
+ GrpcStatusFilter_UNAUTHENTICATED GrpcStatusFilter_Status = 16
+)
+
+// Enum value maps for GrpcStatusFilter_Status.
+var (
+ GrpcStatusFilter_Status_name = map[int32]string{
+ 0: "OK",
+ 1: "CANCELED",
+ 2: "UNKNOWN",
+ 3: "INVALID_ARGUMENT",
+ 4: "DEADLINE_EXCEEDED",
+ 5: "NOT_FOUND",
+ 6: "ALREADY_EXISTS",
+ 7: "PERMISSION_DENIED",
+ 8: "RESOURCE_EXHAUSTED",
+ 9: "FAILED_PRECONDITION",
+ 10: "ABORTED",
+ 11: "OUT_OF_RANGE",
+ 12: "UNIMPLEMENTED",
+ 13: "INTERNAL",
+ 14: "UNAVAILABLE",
+ 15: "DATA_LOSS",
+ 16: "UNAUTHENTICATED",
+ }
+ GrpcStatusFilter_Status_value = map[string]int32{
+ "OK": 0,
+ "CANCELED": 1,
+ "UNKNOWN": 2,
+ "INVALID_ARGUMENT": 3,
+ "DEADLINE_EXCEEDED": 4,
+ "NOT_FOUND": 5,
+ "ALREADY_EXISTS": 6,
+ "PERMISSION_DENIED": 7,
+ "RESOURCE_EXHAUSTED": 8,
+ "FAILED_PRECONDITION": 9,
+ "ABORTED": 10,
+ "OUT_OF_RANGE": 11,
+ "UNIMPLEMENTED": 12,
+ "INTERNAL": 13,
+ "UNAVAILABLE": 14,
+ "DATA_LOSS": 15,
+ "UNAUTHENTICATED": 16,
+ }
+)
+
+func (x GrpcStatusFilter_Status) Enum() *GrpcStatusFilter_Status {
+ p := new(GrpcStatusFilter_Status)
+ *p = x
+ return p
+}
+
+func (x GrpcStatusFilter_Status) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcStatusFilter_Status) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcStatusFilter_Status) Type() protoreflect.EnumType {
+ return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1]
+}
+
+func (x GrpcStatusFilter_Status) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcStatusFilter_Status.Descriptor instead.
+func (GrpcStatusFilter_Status) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12, 0}
+}
+
+type AccessLog struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the access log extension configuration.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Filter which is used to determine if the access log needs to be written.
+ Filter *AccessLogFilter `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Custom configuration that must be set according to the access logger extension being instantiated.
+ // [#extension-category: envoy.access_loggers]
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *AccessLog_TypedConfig
+ ConfigType isAccessLog_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *AccessLog) Reset() {
+ *x = AccessLog{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccessLog) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccessLog) ProtoMessage() {}
+
+func (x *AccessLog) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccessLog.ProtoReflect.Descriptor instead.
+func (*AccessLog) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AccessLog) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AccessLog) GetFilter() *AccessLogFilter {
+ if x != nil {
+ return x.Filter
+ }
+ return nil
+}
+
+func (m *AccessLog) GetConfigType() isAccessLog_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *AccessLog) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isAccessLog_ConfigType interface {
+ isAccessLog_ConfigType()
+}
+
+type AccessLog_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {}
+
+// [#next-free-field: 14]
+type AccessLogFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to FilterSpecifier:
+ //
+ // *AccessLogFilter_StatusCodeFilter
+ // *AccessLogFilter_DurationFilter
+ // *AccessLogFilter_NotHealthCheckFilter
+ // *AccessLogFilter_TraceableFilter
+ // *AccessLogFilter_RuntimeFilter
+ // *AccessLogFilter_AndFilter
+ // *AccessLogFilter_OrFilter
+ // *AccessLogFilter_HeaderFilter
+ // *AccessLogFilter_ResponseFlagFilter
+ // *AccessLogFilter_GrpcStatusFilter
+ // *AccessLogFilter_ExtensionFilter
+ // *AccessLogFilter_MetadataFilter
+ // *AccessLogFilter_LogTypeFilter
+ FilterSpecifier isAccessLogFilter_FilterSpecifier `protobuf_oneof:"filter_specifier"`
+}
+
+func (x *AccessLogFilter) Reset() {
+ *x = AccessLogFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccessLogFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccessLogFilter) ProtoMessage() {}
+
+func (x *AccessLogFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccessLogFilter.ProtoReflect.Descriptor instead.
+func (*AccessLogFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *AccessLogFilter) GetFilterSpecifier() isAccessLogFilter_FilterSpecifier {
+ if m != nil {
+ return m.FilterSpecifier
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetStatusCodeFilter() *StatusCodeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_StatusCodeFilter); ok {
+ return x.StatusCodeFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetDurationFilter() *DurationFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_DurationFilter); ok {
+ return x.DurationFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetNotHealthCheckFilter() *NotHealthCheckFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_NotHealthCheckFilter); ok {
+ return x.NotHealthCheckFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetTraceableFilter() *TraceableFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_TraceableFilter); ok {
+ return x.TraceableFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetRuntimeFilter() *RuntimeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_RuntimeFilter); ok {
+ return x.RuntimeFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetAndFilter() *AndFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_AndFilter); ok {
+ return x.AndFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetOrFilter() *OrFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_OrFilter); ok {
+ return x.OrFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetHeaderFilter() *HeaderFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_HeaderFilter); ok {
+ return x.HeaderFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetResponseFlagFilter() *ResponseFlagFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ResponseFlagFilter); ok {
+ return x.ResponseFlagFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetGrpcStatusFilter() *GrpcStatusFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_GrpcStatusFilter); ok {
+ return x.GrpcStatusFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetExtensionFilter() *ExtensionFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ExtensionFilter); ok {
+ return x.ExtensionFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetMetadataFilter() *MetadataFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_MetadataFilter); ok {
+ return x.MetadataFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetLogTypeFilter() *LogTypeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_LogTypeFilter); ok {
+ return x.LogTypeFilter
+ }
+ return nil
+}
+
+type isAccessLogFilter_FilterSpecifier interface {
+ isAccessLogFilter_FilterSpecifier()
+}
+
+type AccessLogFilter_StatusCodeFilter struct {
+ // Status code filter.
+ StatusCodeFilter *StatusCodeFilter `protobuf:"bytes,1,opt,name=status_code_filter,json=statusCodeFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_DurationFilter struct {
+ // Duration filter.
+ DurationFilter *DurationFilter `protobuf:"bytes,2,opt,name=duration_filter,json=durationFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_NotHealthCheckFilter struct {
+ // Not health check filter.
+ NotHealthCheckFilter *NotHealthCheckFilter `protobuf:"bytes,3,opt,name=not_health_check_filter,json=notHealthCheckFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_TraceableFilter struct {
+ // Traceable filter.
+ TraceableFilter *TraceableFilter `protobuf:"bytes,4,opt,name=traceable_filter,json=traceableFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_RuntimeFilter struct {
+ // Runtime filter.
+ RuntimeFilter *RuntimeFilter `protobuf:"bytes,5,opt,name=runtime_filter,json=runtimeFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_AndFilter struct {
+ // And filter.
+ AndFilter *AndFilter `protobuf:"bytes,6,opt,name=and_filter,json=andFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_OrFilter struct {
+ // Or filter.
+ OrFilter *OrFilter `protobuf:"bytes,7,opt,name=or_filter,json=orFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_HeaderFilter struct {
+ // Header filter.
+ HeaderFilter *HeaderFilter `protobuf:"bytes,8,opt,name=header_filter,json=headerFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_ResponseFlagFilter struct {
+ // Response flag filter.
+ ResponseFlagFilter *ResponseFlagFilter `protobuf:"bytes,9,opt,name=response_flag_filter,json=responseFlagFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_GrpcStatusFilter struct {
+ // gRPC status filter.
+ GrpcStatusFilter *GrpcStatusFilter `protobuf:"bytes,10,opt,name=grpc_status_filter,json=grpcStatusFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_ExtensionFilter struct {
+ // Extension filter.
+ // [#extension-category: envoy.access_loggers.extension_filters]
+ ExtensionFilter *ExtensionFilter `protobuf:"bytes,11,opt,name=extension_filter,json=extensionFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_MetadataFilter struct {
+ // Metadata Filter
+ MetadataFilter *MetadataFilter `protobuf:"bytes,12,opt,name=metadata_filter,json=metadataFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_LogTypeFilter struct {
+ // Log Type Filter
+ LogTypeFilter *LogTypeFilter `protobuf:"bytes,13,opt,name=log_type_filter,json=logTypeFilter,proto3,oneof"`
+}
+
+func (*AccessLogFilter_StatusCodeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_DurationFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_NotHealthCheckFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_TraceableFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_RuntimeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_AndFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_OrFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_HeaderFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_ResponseFlagFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_GrpcStatusFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_ExtensionFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_MetadataFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_LogTypeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+// Filter on an integer comparison.
+type ComparisonFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison operator.
+ Op ComparisonFilter_Op `protobuf:"varint,1,opt,name=op,proto3,enum=envoy.config.accesslog.v3.ComparisonFilter_Op" json:"op,omitempty"`
+ // Value to compare against.
+ Value *v3.RuntimeUInt32 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *ComparisonFilter) Reset() {
+ *x = ComparisonFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ComparisonFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ComparisonFilter) ProtoMessage() {}
+
+func (x *ComparisonFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ComparisonFilter.ProtoReflect.Descriptor instead.
+func (*ComparisonFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ComparisonFilter) GetOp() ComparisonFilter_Op {
+ if x != nil {
+ return x.Op
+ }
+ return ComparisonFilter_EQ
+}
+
+func (x *ComparisonFilter) GetValue() *v3.RuntimeUInt32 {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// Filters on HTTP response/status code.
+type StatusCodeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison.
+ Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"`
+}
+
+func (x *StatusCodeFilter) Reset() {
+ *x = StatusCodeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusCodeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusCodeFilter) ProtoMessage() {}
+
+func (x *StatusCodeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusCodeFilter.ProtoReflect.Descriptor instead.
+func (*StatusCodeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StatusCodeFilter) GetComparison() *ComparisonFilter {
+ if x != nil {
+ return x.Comparison
+ }
+ return nil
+}
+
+// Filters based on the duration of the request or stream, in milliseconds.
+// For end of stream access logs, the total duration of the stream will be used.
+// For :ref:`periodic access logs`,
+// the duration of the stream at the time of log recording will be used.
+type DurationFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison.
+ Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"`
+}
+
+func (x *DurationFilter) Reset() {
+ *x = DurationFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DurationFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DurationFilter) ProtoMessage() {}
+
+func (x *DurationFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DurationFilter.ProtoReflect.Descriptor instead.
+func (*DurationFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *DurationFilter) GetComparison() *ComparisonFilter {
+ if x != nil {
+ return x.Comparison
+ }
+ return nil
+}
+
+// Filters for requests that are not health check requests. A health check
+// request is marked by the health check filter.
+type NotHealthCheckFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *NotHealthCheckFilter) Reset() {
+ *x = NotHealthCheckFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NotHealthCheckFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotHealthCheckFilter) ProtoMessage() {}
+
+func (x *NotHealthCheckFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotHealthCheckFilter.ProtoReflect.Descriptor instead.
+func (*NotHealthCheckFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5}
+}
+
+// Filters for requests that are traceable. See the tracing overview for more
+// information on how a request becomes traceable.
+type TraceableFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TraceableFilter) Reset() {
+ *x = TraceableFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraceableFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraceableFilter) ProtoMessage() {}
+
+func (x *TraceableFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraceableFilter.ProtoReflect.Descriptor instead.
+func (*TraceableFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6}
+}
+
+// Filters requests based on runtime-configurable sampling rates.
+type RuntimeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies a key used to look up a custom sampling rate from the runtime configuration. If a value is found for this
+ // key, it will override the default sampling rate specified in “percent_sampled“.
+ RuntimeKey string `protobuf:"bytes,1,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+ // Defines the default sampling percentage when no runtime override is present. If not specified, the default is
+ // **0%** (with a denominator of 100).
+ PercentSampled *v31.FractionalPercent `protobuf:"bytes,2,opt,name=percent_sampled,json=percentSampled,proto3" json:"percent_sampled,omitempty"`
+ // Controls how sampling decisions are made.
+ //
+ // - Default behavior (“false“):
+ //
+ // - Uses the :ref:`x-request-id` as a consistent sampling pivot.
+ // - When :ref:`x-request-id` is present, sampling will be consistent
+ // across multiple hosts based on both the “runtime_key“ and
+ // :ref:`x-request-id`.
+ // - Useful for tracking related requests across a distributed system.
+ //
+ // - When set to “true“ or :ref:`x-request-id` is missing:
+ //
+ // - Sampling decisions are made randomly based only on the “runtime_key“.
+ // - Useful in complex filter configurations (like nested
+ // :ref:`AndFilter`/
+ // :ref:`OrFilter` blocks) where independent probability
+ // calculations are desired.
+ // - Can be used to implement logging kill switches with predictable probability distributions.
+ UseIndependentRandomness bool `protobuf:"varint,3,opt,name=use_independent_randomness,json=useIndependentRandomness,proto3" json:"use_independent_randomness,omitempty"`
+}
+
+func (x *RuntimeFilter) Reset() {
+ *x = RuntimeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFilter) ProtoMessage() {}
+
+func (x *RuntimeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFilter.ProtoReflect.Descriptor instead.
+func (*RuntimeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeFilter) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+func (x *RuntimeFilter) GetPercentSampled() *v31.FractionalPercent {
+ if x != nil {
+ return x.PercentSampled
+ }
+ return nil
+}
+
+func (x *RuntimeFilter) GetUseIndependentRandomness() bool {
+ if x != nil {
+ return x.UseIndependentRandomness
+ }
+ return false
+}
+
+// Performs a logical “and” operation on the result of each filter in filters.
+// Filters are evaluated sequentially and if one of them returns false, the
+// filter returns false immediately.
+type AndFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Filters []*AccessLogFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *AndFilter) Reset() {
+ *x = AndFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AndFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AndFilter) ProtoMessage() {}
+
+func (x *AndFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AndFilter.ProtoReflect.Descriptor instead.
+func (*AndFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *AndFilter) GetFilters() []*AccessLogFilter {
+ if x != nil {
+ return x.Filters
+ }
+ return nil
+}
+
+// Performs a logical “or” operation on the result of each individual filter.
+// Filters are evaluated sequentially and if one of them returns true, the
+// filter returns true immediately.
+type OrFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Filters []*AccessLogFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *OrFilter) Reset() {
+ *x = OrFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrFilter) ProtoMessage() {}
+
+func (x *OrFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrFilter.ProtoReflect.Descriptor instead.
+func (*OrFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *OrFilter) GetFilters() []*AccessLogFilter {
+ if x != nil {
+ return x.Filters
+ }
+ return nil
+}
+
+// Filters requests based on the presence or value of a request header.
+type HeaderFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Only requests with a header which matches the specified HeaderMatcher will
+ // pass the filter check.
+ Header *v32.HeaderMatcher `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+}
+
+func (x *HeaderFilter) Reset() {
+ *x = HeaderFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderFilter) ProtoMessage() {}
+
+func (x *HeaderFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderFilter.ProtoReflect.Descriptor instead.
+func (*HeaderFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HeaderFilter) GetHeader() *v32.HeaderMatcher {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+// Filters requests that received responses with an Envoy response flag set.
+// A list of the response flags can be found
+// in the access log formatter
+// :ref:`documentation`.
+type ResponseFlagFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Only responses with the any of the flags listed in this field will be
+ // logged. This field is optional. If it is not specified, then any response
+ // flag will pass the filter check.
+ Flags []string `protobuf:"bytes,1,rep,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *ResponseFlagFilter) Reset() {
+ *x = ResponseFlagFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResponseFlagFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResponseFlagFilter) ProtoMessage() {}
+
+func (x *ResponseFlagFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResponseFlagFilter.ProtoReflect.Descriptor instead.
+func (*ResponseFlagFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ResponseFlagFilter) GetFlags() []string {
+ if x != nil {
+ return x.Flags
+ }
+ return nil
+}
+
+// Filters gRPC requests based on their response status. If a gRPC status is not
+// provided, the filter will infer the status from the HTTP status code.
+type GrpcStatusFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Logs only responses that have any one of the gRPC statuses in this field.
+ Statuses []GrpcStatusFilter_Status `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.accesslog.v3.GrpcStatusFilter_Status" json:"statuses,omitempty"`
+ // If included and set to true, the filter will instead block all responses
+ // with a gRPC status or inferred gRPC status enumerated in statuses, and
+ // allow all other responses.
+ Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"`
+}
+
+func (x *GrpcStatusFilter) Reset() {
+ *x = GrpcStatusFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcStatusFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcStatusFilter) ProtoMessage() {}
+
+func (x *GrpcStatusFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcStatusFilter.ProtoReflect.Descriptor instead.
+func (*GrpcStatusFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *GrpcStatusFilter) GetStatuses() []GrpcStatusFilter_Status {
+ if x != nil {
+ return x.Statuses
+ }
+ return nil
+}
+
+func (x *GrpcStatusFilter) GetExclude() bool {
+ if x != nil {
+ return x.Exclude
+ }
+ return false
+}
+
+// Filters based on matching dynamic metadata.
+// If the matcher path and key correspond to an existing key in dynamic
+// metadata, the request is logged only if the matcher value is equal to the
+// metadata value. If the matcher path and key *do not* correspond to an
+// existing key in dynamic metadata, the request is logged only if
+// match_if_key_not_found is "true" or unset.
+type MetadataFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Matcher to check metadata for specified value. For example, to match on the
+ // access_log_hint metadata, set the filter to "envoy.common" and the path to
+ // "access_log_hint", and the value to "true".
+ Matcher *v33.MetadataMatcher `protobuf:"bytes,1,opt,name=matcher,proto3" json:"matcher,omitempty"`
+ // Default result if the key does not exist in dynamic metadata: if unset or
+ // true, then log; if false, then don't log.
+ MatchIfKeyNotFound *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"`
+}
+
+func (x *MetadataFilter) Reset() {
+ *x = MetadataFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetadataFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataFilter) ProtoMessage() {}
+
+func (x *MetadataFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataFilter.ProtoReflect.Descriptor instead.
+func (*MetadataFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *MetadataFilter) GetMatcher() *v33.MetadataMatcher {
+ if x != nil {
+ return x.Matcher
+ }
+ return nil
+}
+
+func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.MatchIfKeyNotFound
+ }
+ return nil
+}
+
+// Filters based on access log type.
+type LogTypeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Logs only records which their type is one of the types defined in this field.
+ Types []v34.AccessLogType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"types,omitempty"`
+ // If this field is set to true, the filter will instead block all records
+ // with a access log type in types field, and allow all other records.
+ Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"`
+}
+
+func (x *LogTypeFilter) Reset() {
+ *x = LogTypeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogTypeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogTypeFilter) ProtoMessage() {}
+
+func (x *LogTypeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogTypeFilter.ProtoReflect.Descriptor instead.
+func (*LogTypeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *LogTypeFilter) GetTypes() []v34.AccessLogType {
+ if x != nil {
+ return x.Types
+ }
+ return nil
+}
+
+func (x *LogTypeFilter) GetExclude() bool {
+ if x != nil {
+ return x.Exclude
+ }
+ return false
+}
+
+// Extension filter is statically registered at runtime.
+type ExtensionFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the filter implementation to instantiate. The name must
+ // match a statically registered filter.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Custom configuration that depends on the filter being instantiated.
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *ExtensionFilter_TypedConfig
+ ConfigType isExtensionFilter_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *ExtensionFilter) Reset() {
+ *x = ExtensionFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtensionFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionFilter) ProtoMessage() {}
+
+func (x *ExtensionFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionFilter.ProtoReflect.Descriptor instead.
+func (*ExtensionFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *ExtensionFilter) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *ExtensionFilter) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isExtensionFilter_ConfigType interface {
+ isExtensionFilter_ConfigType()
+}
+
+type ExtensionFilter_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {}
+
+var File_envoy_config_accesslog_v3_accesslog_proto protoreflect.FileDescriptor
+
+var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72,
+ 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72,
+ 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a,
+ 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a,
+ 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+ 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04,
+ 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0f, 0x41, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09,
+ 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61,
+ 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10,
+ 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x52, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x17, 0x0a, 0x10,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72,
+ 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x02, 0x6f, 0x70,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01,
+ 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, 0x4f, 0x70, 0x12,
+ 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x01, 0x12,
+ 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72,
+ 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f,
+ 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x38, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64,
+ 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61,
+ 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f,
+ 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, 0x4e, 0x6f, 0x74,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22,
+ 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63,
+ 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x0d,
+ 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a,
+ 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e,
+ 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65,
+ 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x70,
+ 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73,
+ 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d,
+ 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, 0x6e, 0x64, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41,
+ 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x4f, 0x72, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa7, 0x01,
+ 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x90, 0x01,
+ 0xfa, 0x42, 0x8c, 0x01, 0x92, 0x01, 0x88, 0x01, 0x22, 0x85, 0x01, 0x72, 0x82, 0x01, 0x52, 0x02,
+ 0x4c, 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02,
+ 0x55, 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02,
+ 0x4e, 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04,
+ 0x55, 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03,
+ 0x55, 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45,
+ 0x52, 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e,
+ 0x46, 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43,
+ 0x52, 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x02, 0x44, 0x52,
+ 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d,
+ 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75,
+ 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02,
+ 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12,
+ 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d,
+ 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e,
+ 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09,
+ 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41,
+ 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12,
+ 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45,
+ 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17,
+ 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44,
+ 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54,
+ 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52,
+ 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c,
+ 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54,
+ 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41,
+ 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41,
+ 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54,
+ 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f,
+ 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66,
+ 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a,
+ 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48,
+ 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a,
+ 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce sync.Once
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = file_envoy_config_accesslog_v3_accesslog_proto_rawDesc
+)
+
+func file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP() []byte {
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() {
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_accesslog_v3_accesslog_proto_rawDescData)
+ })
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescData
+}
+
+var file_envoy_config_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{
+ (ComparisonFilter_Op)(0), // 0: envoy.config.accesslog.v3.ComparisonFilter.Op
+ (GrpcStatusFilter_Status)(0), // 1: envoy.config.accesslog.v3.GrpcStatusFilter.Status
+ (*AccessLog)(nil), // 2: envoy.config.accesslog.v3.AccessLog
+ (*AccessLogFilter)(nil), // 3: envoy.config.accesslog.v3.AccessLogFilter
+ (*ComparisonFilter)(nil), // 4: envoy.config.accesslog.v3.ComparisonFilter
+ (*StatusCodeFilter)(nil), // 5: envoy.config.accesslog.v3.StatusCodeFilter
+ (*DurationFilter)(nil), // 6: envoy.config.accesslog.v3.DurationFilter
+ (*NotHealthCheckFilter)(nil), // 7: envoy.config.accesslog.v3.NotHealthCheckFilter
+ (*TraceableFilter)(nil), // 8: envoy.config.accesslog.v3.TraceableFilter
+ (*RuntimeFilter)(nil), // 9: envoy.config.accesslog.v3.RuntimeFilter
+ (*AndFilter)(nil), // 10: envoy.config.accesslog.v3.AndFilter
+ (*OrFilter)(nil), // 11: envoy.config.accesslog.v3.OrFilter
+ (*HeaderFilter)(nil), // 12: envoy.config.accesslog.v3.HeaderFilter
+ (*ResponseFlagFilter)(nil), // 13: envoy.config.accesslog.v3.ResponseFlagFilter
+ (*GrpcStatusFilter)(nil), // 14: envoy.config.accesslog.v3.GrpcStatusFilter
+ (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter
+ (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter
+ (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter
+ (*anypb.Any)(nil), // 18: google.protobuf.Any
+ (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32
+ (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent
+ (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher
+ (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher
+ (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue
+ (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType
+}
+var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{
+ 3, // 0: envoy.config.accesslog.v3.AccessLog.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 18, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any
+ 5, // 2: envoy.config.accesslog.v3.AccessLogFilter.status_code_filter:type_name -> envoy.config.accesslog.v3.StatusCodeFilter
+ 6, // 3: envoy.config.accesslog.v3.AccessLogFilter.duration_filter:type_name -> envoy.config.accesslog.v3.DurationFilter
+ 7, // 4: envoy.config.accesslog.v3.AccessLogFilter.not_health_check_filter:type_name -> envoy.config.accesslog.v3.NotHealthCheckFilter
+ 8, // 5: envoy.config.accesslog.v3.AccessLogFilter.traceable_filter:type_name -> envoy.config.accesslog.v3.TraceableFilter
+ 9, // 6: envoy.config.accesslog.v3.AccessLogFilter.runtime_filter:type_name -> envoy.config.accesslog.v3.RuntimeFilter
+ 10, // 7: envoy.config.accesslog.v3.AccessLogFilter.and_filter:type_name -> envoy.config.accesslog.v3.AndFilter
+ 11, // 8: envoy.config.accesslog.v3.AccessLogFilter.or_filter:type_name -> envoy.config.accesslog.v3.OrFilter
+ 12, // 9: envoy.config.accesslog.v3.AccessLogFilter.header_filter:type_name -> envoy.config.accesslog.v3.HeaderFilter
+ 13, // 10: envoy.config.accesslog.v3.AccessLogFilter.response_flag_filter:type_name -> envoy.config.accesslog.v3.ResponseFlagFilter
+ 14, // 11: envoy.config.accesslog.v3.AccessLogFilter.grpc_status_filter:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter
+ 17, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter
+ 15, // 13: envoy.config.accesslog.v3.AccessLogFilter.metadata_filter:type_name -> envoy.config.accesslog.v3.MetadataFilter
+ 16, // 14: envoy.config.accesslog.v3.AccessLogFilter.log_type_filter:type_name -> envoy.config.accesslog.v3.LogTypeFilter
+ 0, // 15: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op
+ 19, // 16: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32
+ 4, // 17: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter
+ 4, // 18: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter
+ 20, // 19: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent
+ 3, // 20: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 3, // 21: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 21, // 22: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher
+ 1, // 23: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status
+ 22, // 24: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher
+ 23, // 25: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue
+ 24, // 26: envoy.config.accesslog.v3.LogTypeFilter.types:type_name -> envoy.data.accesslog.v3.AccessLogType
+ 18, // 27: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any
+ 28, // [28:28] is the sub-list for method output_type
+ 28, // [28:28] is the sub-list for method input_type
+ 28, // [28:28] is the sub-list for extension type_name
+ 28, // [28:28] is the sub-list for extension extendee
+ 0, // [0:28] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_accesslog_v3_accesslog_proto_init() }
+func file_envoy_config_accesslog_v3_accesslog_proto_init() {
+ if File_envoy_config_accesslog_v3_accesslog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccessLog); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccessLogFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ComparisonFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusCodeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DurationFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NotHealthCheckFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TraceableFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AndFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResponseFlagFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcStatusFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetadataFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogTypeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*AccessLog_TypedConfig)(nil),
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*AccessLogFilter_StatusCodeFilter)(nil),
+ (*AccessLogFilter_DurationFilter)(nil),
+ (*AccessLogFilter_NotHealthCheckFilter)(nil),
+ (*AccessLogFilter_TraceableFilter)(nil),
+ (*AccessLogFilter_RuntimeFilter)(nil),
+ (*AccessLogFilter_AndFilter)(nil),
+ (*AccessLogFilter_OrFilter)(nil),
+ (*AccessLogFilter_HeaderFilter)(nil),
+ (*AccessLogFilter_ResponseFlagFilter)(nil),
+ (*AccessLogFilter_GrpcStatusFilter)(nil),
+ (*AccessLogFilter_ExtensionFilter)(nil),
+ (*AccessLogFilter_MetadataFilter)(nil),
+ (*AccessLogFilter_LogTypeFilter)(nil),
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].OneofWrappers = []interface{}{
+ (*ExtensionFilter_TypedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_accesslog_v3_accesslog_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_accesslog_v3_accesslog_proto_goTypes,
+ DependencyIndexes: file_envoy_config_accesslog_v3_accesslog_proto_depIdxs,
+ EnumInfos: file_envoy_config_accesslog_v3_accesslog_proto_enumTypes,
+ MessageInfos: file_envoy_config_accesslog_v3_accesslog_proto_msgTypes,
+ }.Build()
+ File_envoy_config_accesslog_v3_accesslog_proto = out.File
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = nil
+ file_envoy_config_accesslog_v3_accesslog_proto_goTypes = nil
+ file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go
new file mode 100644
index 000000000..746f6f2c4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go
@@ -0,0 +1,2773 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.AccessLogType(0)
+)
+
+// Validate checks the field values on AccessLog with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *AccessLog) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AccessLog with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AccessLogMultiError, or nil
+// if none found.
+func (m *AccessLog) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AccessLog) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.ConfigType.(type) {
+ case *AccessLog_TypedConfig:
+ if v == nil {
+ err := AccessLogValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return AccessLogMultiError(errors)
+ }
+
+ return nil
+}
+
+// AccessLogMultiError is an error wrapping multiple validation errors returned
+// by AccessLog.ValidateAll() if the designated constraints aren't met.
+type AccessLogMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AccessLogMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AccessLogMultiError) AllErrors() []error { return m }
+
+// AccessLogValidationError is the validation error returned by
+// AccessLog.Validate if the designated constraints aren't met.
+type AccessLogValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AccessLogValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AccessLogValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AccessLogValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AccessLogValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AccessLogValidationError) ErrorName() string { return "AccessLogValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AccessLogValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAccessLog.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AccessLogValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AccessLogValidationError{}
+
+// Validate checks the field values on AccessLogFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *AccessLogFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AccessLogFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// AccessLogFilterMultiError, or nil if none found.
+func (m *AccessLogFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AccessLogFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofFilterSpecifierPresent := false
+ switch v := m.FilterSpecifier.(type) {
+ case *AccessLogFilter_StatusCodeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStatusCodeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatusCodeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_DurationFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetDurationFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDurationFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_NotHealthCheckFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetNotHealthCheckFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNotHealthCheckFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_TraceableFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetTraceableFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTraceableFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_RuntimeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetRuntimeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRuntimeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_AndFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAndFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAndFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_OrFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetOrFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOrFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_HeaderFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetHeaderFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeaderFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_ResponseFlagFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetResponseFlagFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResponseFlagFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_GrpcStatusFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGrpcStatusFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcStatusFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_ExtensionFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetExtensionFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExtensionFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_MetadataFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetMetadataFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMetadataFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_LogTypeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLogTypeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLogTypeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofFilterSpecifierPresent {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AccessLogFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// AccessLogFilterMultiError is an error wrapping multiple validation errors
+// returned by AccessLogFilter.ValidateAll() if the designated constraints
+// aren't met.
+type AccessLogFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AccessLogFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AccessLogFilterMultiError) AllErrors() []error { return m }
+
+// AccessLogFilterValidationError is the validation error returned by
+// AccessLogFilter.Validate if the designated constraints aren't met.
+type AccessLogFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AccessLogFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AccessLogFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AccessLogFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AccessLogFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AccessLogFilterValidationError) ErrorName() string { return "AccessLogFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AccessLogFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAccessLogFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AccessLogFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AccessLogFilterValidationError{}
+
+// Validate checks the field values on ComparisonFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ComparisonFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ComparisonFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ComparisonFilterMultiError, or nil if none found.
+func (m *ComparisonFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ComparisonFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ComparisonFilter_Op_name[int32(m.GetOp())]; !ok {
+ err := ComparisonFilterValidationError{
+ field: "Op",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetValue() == nil {
+ err := ComparisonFilterValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ComparisonFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ComparisonFilterMultiError is an error wrapping multiple validation errors
+// returned by ComparisonFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ComparisonFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ComparisonFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ComparisonFilterMultiError) AllErrors() []error { return m }
+
+// ComparisonFilterValidationError is the validation error returned by
+// ComparisonFilter.Validate if the designated constraints aren't met.
+type ComparisonFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ComparisonFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ComparisonFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ComparisonFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ComparisonFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ComparisonFilterValidationError) ErrorName() string { return "ComparisonFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ComparisonFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sComparisonFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ComparisonFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ComparisonFilterValidationError{}
+
+// Validate checks the field values on StatusCodeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusCodeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusCodeFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusCodeFilterMultiError, or nil if none found.
+func (m *StatusCodeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusCodeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetComparison() == nil {
+ err := StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetComparison()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return StatusCodeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusCodeFilterMultiError is an error wrapping multiple validation errors
+// returned by StatusCodeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type StatusCodeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusCodeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusCodeFilterMultiError) AllErrors() []error { return m }
+
+// StatusCodeFilterValidationError is the validation error returned by
+// StatusCodeFilter.Validate if the designated constraints aren't met.
+type StatusCodeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusCodeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusCodeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusCodeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusCodeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusCodeFilterValidationError) ErrorName() string { return "StatusCodeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusCodeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusCodeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusCodeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusCodeFilterValidationError{}
+
+// Validate checks the field values on DurationFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *DurationFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DurationFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in DurationFilterMultiError,
+// or nil if none found.
+func (m *DurationFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DurationFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetComparison() == nil {
+ err := DurationFilterValidationError{
+ field: "Comparison",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetComparison()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return DurationFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// DurationFilterMultiError is an error wrapping multiple validation errors
+// returned by DurationFilter.ValidateAll() if the designated constraints
+// aren't met.
+type DurationFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DurationFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DurationFilterMultiError) AllErrors() []error { return m }
+
+// DurationFilterValidationError is the validation error returned by
+// DurationFilter.Validate if the designated constraints aren't met.
+type DurationFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DurationFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DurationFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DurationFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DurationFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DurationFilterValidationError) ErrorName() string { return "DurationFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DurationFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDurationFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DurationFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DurationFilterValidationError{}
+
+// Validate checks the field values on NotHealthCheckFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *NotHealthCheckFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NotHealthCheckFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// NotHealthCheckFilterMultiError, or nil if none found.
+func (m *NotHealthCheckFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NotHealthCheckFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return NotHealthCheckFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// NotHealthCheckFilterMultiError is an error wrapping multiple validation
+// errors returned by NotHealthCheckFilter.ValidateAll() if the designated
+// constraints aren't met.
+type NotHealthCheckFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NotHealthCheckFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NotHealthCheckFilterMultiError) AllErrors() []error { return m }
+
+// NotHealthCheckFilterValidationError is the validation error returned by
+// NotHealthCheckFilter.Validate if the designated constraints aren't met.
+type NotHealthCheckFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NotHealthCheckFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NotHealthCheckFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NotHealthCheckFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NotHealthCheckFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NotHealthCheckFilterValidationError) ErrorName() string {
+ return "NotHealthCheckFilterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e NotHealthCheckFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNotHealthCheckFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NotHealthCheckFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NotHealthCheckFilterValidationError{}
+
+// Validate checks the field values on TraceableFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *TraceableFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TraceableFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TraceableFilterMultiError, or nil if none found.
+func (m *TraceableFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TraceableFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return TraceableFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// TraceableFilterMultiError is an error wrapping multiple validation errors
+// returned by TraceableFilter.ValidateAll() if the designated constraints
+// aren't met.
+type TraceableFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TraceableFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TraceableFilterMultiError) AllErrors() []error { return m }
+
+// TraceableFilterValidationError is the validation error returned by
+// TraceableFilter.Validate if the designated constraints aren't met.
+type TraceableFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TraceableFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TraceableFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TraceableFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TraceableFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TraceableFilterValidationError) ErrorName() string { return "TraceableFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TraceableFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTraceableFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TraceableFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TraceableFilterValidationError{}
+
+// Validate checks the field values on RuntimeFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeFilterMultiError, or
+// nil if none found.
+func (m *RuntimeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimeFilterValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetPercentSampled()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPercentSampled()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UseIndependentRandomness
+
+ if len(errors) > 0 {
+ return RuntimeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeFilterMultiError is an error wrapping multiple validation errors
+// returned by RuntimeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeFilterMultiError) AllErrors() []error { return m }
+
+// RuntimeFilterValidationError is the validation error returned by
+// RuntimeFilter.Validate if the designated constraints aren't met.
+type RuntimeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFilterValidationError) ErrorName() string { return "RuntimeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFilterValidationError{}
+
+// Validate checks the field values on AndFilter with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *AndFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AndFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AndFilterMultiError, or nil
+// if none found.
+func (m *AndFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AndFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetFilters()) < 2 {
+ err := AndFilterValidationError{
+ field: "Filters",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return AndFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// AndFilterMultiError is an error wrapping multiple validation errors returned
+// by AndFilter.ValidateAll() if the designated constraints aren't met.
+type AndFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AndFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AndFilterMultiError) AllErrors() []error { return m }
+
+// AndFilterValidationError is the validation error returned by
+// AndFilter.Validate if the designated constraints aren't met.
+type AndFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AndFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AndFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AndFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AndFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AndFilterValidationError) ErrorName() string { return "AndFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AndFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAndFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AndFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AndFilterValidationError{}
+
+// Validate checks the field values on OrFilter with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *OrFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in OrFilterMultiError, or nil
+// if none found.
+func (m *OrFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetFilters()) < 2 {
+ err := OrFilterValidationError{
+ field: "Filters",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return OrFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrFilterMultiError is an error wrapping multiple validation errors returned
+// by OrFilter.ValidateAll() if the designated constraints aren't met.
+type OrFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrFilterMultiError) AllErrors() []error { return m }
+
+// OrFilterValidationError is the validation error returned by
+// OrFilter.Validate if the designated constraints aren't met.
+type OrFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrFilterValidationError) ErrorName() string { return "OrFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e OrFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrFilterValidationError{}
+
+// Validate checks the field values on HeaderFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HeaderFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HeaderFilterMultiError, or
+// nil if none found.
+func (m *HeaderFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetHeader() == nil {
+ err := HeaderFilterValidationError{
+ field: "Header",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHeader()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return HeaderFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderFilterMultiError is an error wrapping multiple validation errors
+// returned by HeaderFilter.ValidateAll() if the designated constraints aren't met.
+type HeaderFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderFilterMultiError) AllErrors() []error { return m }
+
+// HeaderFilterValidationError is the validation error returned by
+// HeaderFilter.Validate if the designated constraints aren't met.
+type HeaderFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderFilterValidationError) ErrorName() string { return "HeaderFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderFilterValidationError{}
+
+// Validate checks the field values on ResponseFlagFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResponseFlagFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResponseFlagFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResponseFlagFilterMultiError, or nil if none found.
+func (m *ResponseFlagFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResponseFlagFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetFlags() {
+ _, _ = idx, item
+
+ if _, ok := _ResponseFlagFilter_Flags_InLookup[item]; !ok {
+ err := ResponseFlagFilterValidationError{
+ field: fmt.Sprintf("Flags[%v]", idx),
+ reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO DR]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ResponseFlagFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResponseFlagFilterMultiError is an error wrapping multiple validation errors
+// returned by ResponseFlagFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ResponseFlagFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResponseFlagFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResponseFlagFilterMultiError) AllErrors() []error { return m }
+
+// ResponseFlagFilterValidationError is the validation error returned by
+// ResponseFlagFilter.Validate if the designated constraints aren't met.
+type ResponseFlagFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResponseFlagFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResponseFlagFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResponseFlagFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResponseFlagFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResponseFlagFilterValidationError) ErrorName() string {
+ return "ResponseFlagFilterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResponseFlagFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResponseFlagFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResponseFlagFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResponseFlagFilterValidationError{}
+
+var _ResponseFlagFilter_Flags_InLookup = map[string]struct{}{
+ "LH": {},
+ "UH": {},
+ "UT": {},
+ "LR": {},
+ "UR": {},
+ "UF": {},
+ "UC": {},
+ "UO": {},
+ "NR": {},
+ "DI": {},
+ "FI": {},
+ "RL": {},
+ "UAEX": {},
+ "RLSE": {},
+ "DC": {},
+ "URX": {},
+ "SI": {},
+ "IH": {},
+ "DPE": {},
+ "UMSDR": {},
+ "RFCF": {},
+ "NFCF": {},
+ "DT": {},
+ "UPE": {},
+ "NC": {},
+ "OM": {},
+ "DF": {},
+ "DO": {},
+ "DR": {},
+}
+
+// Validate checks the field values on GrpcStatusFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *GrpcStatusFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcStatusFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcStatusFilterMultiError, or nil if none found.
+func (m *GrpcStatusFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcStatusFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStatuses() {
+ _, _ = idx, item
+
+ if _, ok := GrpcStatusFilter_Status_name[int32(item)]; !ok {
+ err := GrpcStatusFilterValidationError{
+ field: fmt.Sprintf("Statuses[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for Exclude
+
+ if len(errors) > 0 {
+ return GrpcStatusFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcStatusFilterMultiError is an error wrapping multiple validation errors
+// returned by GrpcStatusFilter.ValidateAll() if the designated constraints
+// aren't met.
+type GrpcStatusFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcStatusFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcStatusFilterMultiError) AllErrors() []error { return m }
+
+// GrpcStatusFilterValidationError is the validation error returned by
+// GrpcStatusFilter.Validate if the designated constraints aren't met.
+type GrpcStatusFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcStatusFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcStatusFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcStatusFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcStatusFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcStatusFilterValidationError) ErrorName() string { return "GrpcStatusFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcStatusFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcStatusFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcStatusFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcStatusFilterValidationError{}
+
+// Validate checks the field values on MetadataFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *MetadataFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MetadataFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in MetadataFilterMultiError,
+// or nil if none found.
+func (m *MetadataFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MetadataFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatchIfKeyNotFound()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatchIfKeyNotFound()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return MetadataFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// MetadataFilterMultiError is an error wrapping multiple validation errors
+// returned by MetadataFilter.ValidateAll() if the designated constraints
+// aren't met.
+type MetadataFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MetadataFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MetadataFilterMultiError) AllErrors() []error { return m }
+
+// MetadataFilterValidationError is the validation error returned by
+// MetadataFilter.Validate if the designated constraints aren't met.
+type MetadataFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataFilterValidationError) ErrorName() string { return "MetadataFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MetadataFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadataFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataFilterValidationError{}
+
+// Validate checks the field values on LogTypeFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *LogTypeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on LogTypeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LogTypeFilterMultiError, or
+// nil if none found.
+func (m *LogTypeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *LogTypeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetTypes() {
+ _, _ = idx, item
+
+ if _, ok := v3.AccessLogType_name[int32(item)]; !ok {
+ err := LogTypeFilterValidationError{
+ field: fmt.Sprintf("Types[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for Exclude
+
+ if len(errors) > 0 {
+ return LogTypeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// LogTypeFilterMultiError is an error wrapping multiple validation errors
+// returned by LogTypeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type LogTypeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LogTypeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LogTypeFilterMultiError) AllErrors() []error { return m }
+
+// LogTypeFilterValidationError is the validation error returned by
+// LogTypeFilter.Validate if the designated constraints aren't met.
+type LogTypeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LogTypeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LogTypeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LogTypeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LogTypeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LogTypeFilterValidationError) ErrorName() string { return "LogTypeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LogTypeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLogTypeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LogTypeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LogTypeFilterValidationError{}
+
+// Validate checks the field values on ExtensionFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ExtensionFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ExtensionFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ExtensionFilterMultiError, or nil if none found.
+func (m *ExtensionFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ExtensionFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ switch v := m.ConfigType.(type) {
+ case *ExtensionFilter_TypedConfig:
+ if v == nil {
+ err := ExtensionFilterValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return ExtensionFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ExtensionFilterMultiError is an error wrapping multiple validation errors
+// returned by ExtensionFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ExtensionFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ExtensionFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ExtensionFilterMultiError) AllErrors() []error { return m }
+
+// ExtensionFilterValidationError is the validation error returned by
+// ExtensionFilter.Validate if the designated constraints aren't met.
+type ExtensionFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionFilterValidationError) ErrorName() string { return "ExtensionFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ExtensionFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtensionFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionFilterValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go
new file mode 100644
index 000000000..e75bf014a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go
@@ -0,0 +1,1751 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *AccessLog) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLog) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.ConfigType.(*AccessLog_TypedConfig); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if m.Filter != nil {
+ size, err := m.Filter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLog_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLog_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TypedConfig != nil {
+ size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLogFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_LogTypeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_MetadataFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ExtensionFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_GrpcStatusFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ResponseFlagFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_HeaderFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_OrFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_AndFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_RuntimeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_TraceableFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_NotHealthCheckFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_DurationFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_StatusCodeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.StatusCodeFilter != nil {
+ size, err := m.StatusCodeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DurationFilter != nil {
+ size, err := m.DurationFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NotHealthCheckFilter != nil {
+ size, err := m.NotHealthCheckFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TraceableFilter != nil {
+ size, err := m.TraceableFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RuntimeFilter != nil {
+ size, err := m.RuntimeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x2a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AndFilter != nil {
+ size, err := m.AndFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x32
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.OrFilter != nil {
+ size, err := m.OrFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x3a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.HeaderFilter != nil {
+ size, err := m.HeaderFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x42
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponseFlagFilter != nil {
+ size, err := m.ResponseFlagFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x4a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x4a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.GrpcStatusFilter != nil {
+ size, err := m.GrpcStatusFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x52
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x52
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ExtensionFilter != nil {
+ size, err := m.ExtensionFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x5a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x5a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.MetadataFilter != nil {
+ size, err := m.MetadataFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x62
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x62
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.LogTypeFilter != nil {
+ size, err := m.LogTypeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x6a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x6a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ComparisonFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ComparisonFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ComparisonFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Value != nil {
+ if vtmsg, ok := interface{}(m.Value).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Value)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Op != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Op))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatusCodeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Comparison != nil {
+ size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DurationFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Comparison != nil {
+ size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NotHealthCheckFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TraceableFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.UseIndependentRandomness {
+ i--
+ if m.UseIndependentRandomness {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.PercentSampled != nil {
+ if vtmsg, ok := interface{}(m.PercentSampled).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.PercentSampled)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.RuntimeKey) > 0 {
+ i -= len(m.RuntimeKey)
+ copy(dAtA[i:], m.RuntimeKey)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AndFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Filters) > 0 {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OrFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Filters) > 0 {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HeaderFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Header != nil {
+ if vtmsg, ok := interface{}(m.Header).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Header)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResponseFlagFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Flags) > 0 {
+ for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Flags[iNdEx])
+ copy(dAtA[i:], m.Flags[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Flags[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GrpcStatusFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Exclude {
+ i--
+ if m.Exclude {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Statuses) > 0 {
+ var pksize2 int
+ for _, num := range m.Statuses {
+ pksize2 += protohelpers.SizeOfVarint(uint64(num))
+ }
+ i -= pksize2
+ j1 := i
+ for _, num1 := range m.Statuses {
+ num := uint64(num1)
+ for num >= 1<<7 {
+ dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j1++
+ }
+ dAtA[j1] = uint8(num)
+ j1++
+ }
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MetadataFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MatchIfKeyNotFound != nil {
+ size, err := (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Matcher != nil {
+ if vtmsg, ok := interface{}(m.Matcher).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Matcher)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LogTypeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Exclude {
+ i--
+ if m.Exclude {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Types) > 0 {
+ var pksize2 int
+ for _, num := range m.Types {
+ pksize2 += protohelpers.SizeOfVarint(uint64(num))
+ }
+ i -= pksize2
+ j1 := i
+ for _, num1 := range m.Types {
+ num := uint64(num1)
+ for num >= 1<<7 {
+ dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j1++
+ }
+ dAtA[j1] = uint8(num)
+ j1++
+ }
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ExtensionFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.ConfigType.(*ExtensionFilter_TypedConfig); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ExtensionFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ExtensionFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TypedConfig != nil {
+ size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLog) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Filter != nil {
+ l = m.Filter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AccessLog_TypedConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TypedConfig != nil {
+ l = (*anypb.Any)(m.TypedConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.FilterSpecifier.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.StatusCodeFilter != nil {
+ l = m.StatusCodeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_DurationFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DurationFilter != nil {
+ l = m.DurationFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_NotHealthCheckFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NotHealthCheckFilter != nil {
+ l = m.NotHealthCheckFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_TraceableFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TraceableFilter != nil {
+ l = m.TraceableFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_RuntimeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RuntimeFilter != nil {
+ l = m.RuntimeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_AndFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AndFilter != nil {
+ l = m.AndFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_OrFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.OrFilter != nil {
+ l = m.OrFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_HeaderFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.HeaderFilter != nil {
+ l = m.HeaderFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_ResponseFlagFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponseFlagFilter != nil {
+ l = m.ResponseFlagFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_GrpcStatusFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.GrpcStatusFilter != nil {
+ l = m.GrpcStatusFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_ExtensionFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ExtensionFilter != nil {
+ l = m.ExtensionFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_MetadataFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MetadataFilter != nil {
+ l = m.MetadataFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_LogTypeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LogTypeFilter != nil {
+ l = m.LogTypeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *ComparisonFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Op != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Op))
+ }
+ if m.Value != nil {
+ if size, ok := interface{}(m.Value).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Value)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *StatusCodeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Comparison != nil {
+ l = m.Comparison.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *DurationFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Comparison != nil {
+ l = m.Comparison.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *NotHealthCheckFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *TraceableFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.RuntimeKey)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.PercentSampled != nil {
+ if size, ok := interface{}(m.PercentSampled).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.PercentSampled)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UseIndependentRandomness {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AndFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filters) > 0 {
+ for _, e := range m.Filters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *OrFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filters) > 0 {
+ for _, e := range m.Filters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HeaderFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ if size, ok := interface{}(m.Header).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Header)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ResponseFlagFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Flags) > 0 {
+ for _, s := range m.Flags {
+ l = len(s)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GrpcStatusFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Statuses) > 0 {
+ l = 0
+ for _, e := range m.Statuses {
+ l += protohelpers.SizeOfVarint(uint64(e))
+ }
+ n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l
+ }
+ if m.Exclude {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *MetadataFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Matcher != nil {
+ if size, ok := interface{}(m.Matcher).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Matcher)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MatchIfKeyNotFound != nil {
+ l = (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *LogTypeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Types) > 0 {
+ l = 0
+ for _, e := range m.Types {
+ l += protohelpers.SizeOfVarint(uint64(e))
+ }
+ n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l
+ }
+ if m.Exclude {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ExtensionFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ExtensionFilter_TypedConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TypedConfig != nil {
+ l = (*anypb.Any)(m.TypedConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go
new file mode 100644
index 000000000..a8522cc1a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go
@@ -0,0 +1,3310 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
+ v37 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v36 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/config/overload/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3"
+ v38 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
+ v35 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.
+// Within an event type, actions execute in the order they are configured.
+// For KILL/MULTIKILL there is a default PANIC that will run after the
+// registered actions and kills the process if it wasn't already killed.
+// It might be useful to specify several debug actions, and possibly an
+// alternate FATAL action.
+type Watchdog_WatchdogAction_WatchdogEvent int32
+
+const (
+ Watchdog_WatchdogAction_UNKNOWN Watchdog_WatchdogAction_WatchdogEvent = 0
+ Watchdog_WatchdogAction_KILL Watchdog_WatchdogAction_WatchdogEvent = 1
+ Watchdog_WatchdogAction_MULTIKILL Watchdog_WatchdogAction_WatchdogEvent = 2
+ Watchdog_WatchdogAction_MEGAMISS Watchdog_WatchdogAction_WatchdogEvent = 3
+ Watchdog_WatchdogAction_MISS Watchdog_WatchdogAction_WatchdogEvent = 4
+)
+
+// Enum value maps for Watchdog_WatchdogAction_WatchdogEvent.
+var (
+ Watchdog_WatchdogAction_WatchdogEvent_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "KILL",
+ 2: "MULTIKILL",
+ 3: "MEGAMISS",
+ 4: "MISS",
+ }
+ Watchdog_WatchdogAction_WatchdogEvent_value = map[string]int32{
+ "UNKNOWN": 0,
+ "KILL": 1,
+ "MULTIKILL": 2,
+ "MEGAMISS": 3,
+ "MISS": 4,
+ }
+)
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) Enum() *Watchdog_WatchdogAction_WatchdogEvent {
+ p := new(Watchdog_WatchdogAction_WatchdogEvent)
+ *p = x
+ return p
+}
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Watchdog_WatchdogAction_WatchdogEvent) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0].Descriptor()
+}
+
+func (Watchdog_WatchdogAction_WatchdogEvent) Type() protoreflect.EnumType {
+ return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0]
+}
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Watchdog_WatchdogAction_WatchdogEvent.Descriptor instead.
+func (Watchdog_WatchdogAction_WatchdogEvent) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0, 0}
+}
+
+type CustomInlineHeader_InlineHeaderType int32
+
+const (
+ CustomInlineHeader_REQUEST_HEADER CustomInlineHeader_InlineHeaderType = 0
+ CustomInlineHeader_REQUEST_TRAILER CustomInlineHeader_InlineHeaderType = 1
+ CustomInlineHeader_RESPONSE_HEADER CustomInlineHeader_InlineHeaderType = 2
+ CustomInlineHeader_RESPONSE_TRAILER CustomInlineHeader_InlineHeaderType = 3
+)
+
+// Enum value maps for CustomInlineHeader_InlineHeaderType.
+var (
+ CustomInlineHeader_InlineHeaderType_name = map[int32]string{
+ 0: "REQUEST_HEADER",
+ 1: "REQUEST_TRAILER",
+ 2: "RESPONSE_HEADER",
+ 3: "RESPONSE_TRAILER",
+ }
+ CustomInlineHeader_InlineHeaderType_value = map[string]int32{
+ "REQUEST_HEADER": 0,
+ "REQUEST_TRAILER": 1,
+ "RESPONSE_HEADER": 2,
+ "RESPONSE_TRAILER": 3,
+ }
+)
+
+func (x CustomInlineHeader_InlineHeaderType) Enum() *CustomInlineHeader_InlineHeaderType {
+ p := new(CustomInlineHeader_InlineHeaderType)
+ *p = x
+ return p
+}
+
+func (x CustomInlineHeader_InlineHeaderType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CustomInlineHeader_InlineHeaderType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1].Descriptor()
+}
+
+func (CustomInlineHeader_InlineHeaderType) Type() protoreflect.EnumType {
+ return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1]
+}
+
+func (x CustomInlineHeader_InlineHeaderType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CustomInlineHeader_InlineHeaderType.Descriptor instead.
+func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9, 0}
+}
+
+// Bootstrap :ref:`configuration overview `.
+// [#next-free-field: 42]
+type Bootstrap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Node identity to present to the management server and for instance
+ // identification purposes (e.g. in generated headers).
+ Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of :ref:`Node ` field names
+ // that will be included in the context parameters of the effective
+ // xdstp:// URL that is sent in a discovery request when resource
+ // locators are used for LDS/CDS. Any non-string field will have its JSON
+ // encoding set as the context parameter value, with the exception of
+ // metadata, which will be flattened (see example below). The supported field
+ // names are:
+ // - "cluster"
+ // - "id"
+ // - "locality.region"
+ // - "locality.sub_zone"
+ // - "locality.zone"
+ // - "metadata"
+ // - "user_agent_build_version.metadata"
+ // - "user_agent_build_version.version"
+ // - "user_agent_name"
+ // - "user_agent_version"
+ //
+ // The node context parameters act as a base layer dictionary for the context
+ // parameters (i.e. more specific resource specific context parameters will
+ // override). Field names will be prefixed with “udpa.node.” when included in
+ // context parameters.
+ //
+ // For example, if node_context_params is “["user_agent_name", "metadata"]“,
+ // the implied context parameters might be::
+ //
+ // node.user_agent_name: "envoy"
+ // node.metadata.foo: "{\"bar\": \"baz\"}"
+ // node.metadata.some: "42"
+ // node.metadata.thing: "\"thing\""
+ //
+ // [#not-implemented-hide:]
+ NodeContextParams []string `protobuf:"bytes,26,rep,name=node_context_params,json=nodeContextParams,proto3" json:"node_context_params,omitempty"`
+ // Statically specified resources.
+ StaticResources *Bootstrap_StaticResources `protobuf:"bytes,2,opt,name=static_resources,json=staticResources,proto3" json:"static_resources,omitempty"`
+ // xDS configuration sources.
+ DynamicResources *Bootstrap_DynamicResources `protobuf:"bytes,3,opt,name=dynamic_resources,json=dynamicResources,proto3" json:"dynamic_resources,omitempty"`
+ // Configuration for the cluster manager which owns all upstream clusters
+ // within the server.
+ ClusterManager *ClusterManager `protobuf:"bytes,4,opt,name=cluster_manager,json=clusterManager,proto3" json:"cluster_manager,omitempty"`
+ // Health discovery service config option.
+ // (:ref:`core.ApiConfigSource `)
+ HdsConfig *v3.ApiConfigSource `protobuf:"bytes,14,opt,name=hds_config,json=hdsConfig,proto3" json:"hds_config,omitempty"`
+ // Optional file system path to search for startup flag files.
+ FlagsPath string `protobuf:"bytes,5,opt,name=flags_path,json=flagsPath,proto3" json:"flags_path,omitempty"`
+ // Optional set of stats sinks.
+ StatsSinks []*v31.StatsSink `protobuf:"bytes,6,rep,name=stats_sinks,json=statsSinks,proto3" json:"stats_sinks,omitempty"`
+ // Options to control behaviors of deferred creation compatible stats.
+ DeferredStatOptions *Bootstrap_DeferredStatOptions `protobuf:"bytes,39,opt,name=deferred_stat_options,json=deferredStatOptions,proto3" json:"deferred_stat_options,omitempty"`
+ // Configuration for internal processing of stats.
+ StatsConfig *v31.StatsConfig `protobuf:"bytes,13,opt,name=stats_config,json=statsConfig,proto3" json:"stats_config,omitempty"`
+ // Optional duration between flushes to configured stats sinks. For
+ // performance reasons Envoy latches counters and only flushes counters and
+ // gauges at a periodic interval. If not specified the default is 5000ms (5
+ // seconds). Only one of “stats_flush_interval“ or “stats_flush_on_admin“
+ // can be set.
+ // Duration must be at least 1ms and at most 5 min.
+ StatsFlushInterval *durationpb.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"`
+ // Types that are assignable to StatsFlush:
+ //
+ // *Bootstrap_StatsFlushOnAdmin
+ StatsFlush isBootstrap_StatsFlush `protobuf_oneof:"stats_flush"`
+ // Optional watchdog configuration.
+ // This is for a single watchdog configuration for the entire system.
+ // Deprecated in favor of “watchdogs“ which has finer granularity.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ Watchdog *Watchdog `protobuf:"bytes,8,opt,name=watchdog,proto3" json:"watchdog,omitempty"`
+ // Optional watchdogs configuration.
+ // This is used for specifying different watchdogs for the different subsystems.
+ // [#extension-category: envoy.guarddog_actions]
+ Watchdogs *Watchdogs `protobuf:"bytes,27,opt,name=watchdogs,proto3" json:"watchdogs,omitempty"`
+ // Configuration for an external tracing provider.
+ //
+ // .. attention::
+ //
+ // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider
+ // `.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ Tracing *v32.Tracing `protobuf:"bytes,9,opt,name=tracing,proto3" json:"tracing,omitempty"`
+ // Configuration for the runtime configuration provider. If not
+ // specified, a “null” provider will be used which will result in all defaults
+ // being used.
+ LayeredRuntime *LayeredRuntime `protobuf:"bytes,17,opt,name=layered_runtime,json=layeredRuntime,proto3" json:"layered_runtime,omitempty"`
+ // Configuration for the local administration HTTP server.
+ Admin *Admin `protobuf:"bytes,12,opt,name=admin,proto3" json:"admin,omitempty"`
+ // Optional overload manager configuration.
+ OverloadManager *v33.OverloadManager `protobuf:"bytes,15,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"`
+ // Enable :ref:`stats for event dispatcher `, defaults to false.
+ // Note that this records a value for each iteration of the event loop on every thread. This
+ // should normally be minimal overhead, but when using
+ // :ref:`statsd `, it will send each observed value
+ // over the wire individually because the statsd protocol doesn't have any way to represent a
+ // histogram summary. Be aware that this can be a very large volume of data.
+ EnableDispatcherStats bool `protobuf:"varint,16,opt,name=enable_dispatcher_stats,json=enableDispatcherStats,proto3" json:"enable_dispatcher_stats,omitempty"`
+ // Optional string which will be used in lieu of x-envoy in prefixing headers.
+ //
+ // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be
+ // transformed into x-foo-retry-on etc.
+ //
+ // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the
+ // headers Envoy will trust for core code and core extensions only. Be VERY careful making
+ // changes to this string, especially in multi-layer Envoy deployments or deployments using
+ // extensions which are not upstream.
+ HeaderPrefix string `protobuf:"bytes,18,opt,name=header_prefix,json=headerPrefix,proto3" json:"header_prefix,omitempty"`
+ // Optional proxy version which will be used to set the value of :ref:`server.version statistic
+ // ` if specified. Envoy will not process this value, it will be sent as is to
+ // :ref:`stats sinks `.
+ StatsServerVersionOverride *wrapperspb.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"`
+ // Always use TCP queries instead of UDP queries for DNS lookups.
+ // This may be overridden on a per-cluster basis in cds_config,
+ // when :ref:`dns_resolvers ` and
+ // :ref:`use_tcp_for_dns_lookups ` are
+ // specified.
+ // This field is deprecated in favor of “dns_resolution_config“
+ // which aggregates all of the DNS resolver configuration in a single message.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ UseTcpForDnsLookups bool `protobuf:"varint,20,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"`
+ // DNS resolution configuration which includes the underlying dns resolver addresses and options.
+ // This may be overridden on a per-cluster basis in cds_config, when
+ // :ref:`dns_resolution_config `
+ // is specified.
+ // This field is deprecated in favor of
+ // :ref:`typed_dns_resolver_config `.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ DnsResolutionConfig *v3.DnsResolutionConfig `protobuf:"bytes,30,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"`
+ // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple,
+ // or any other DNS resolver types and the related parameters.
+ // For example, an object of
+ // :ref:`CaresDnsResolverConfig `
+ // can be packed into this “typed_dns_resolver_config“. This configuration replaces the
+ // :ref:`dns_resolution_config `
+ // configuration.
+ // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists,
+ // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“.
+ // When “typed_dns_resolver_config“ is missing, the default behavior is in place.
+ // [#extension-category: envoy.network.dns_resolver]
+ TypedDnsResolverConfig *v3.TypedExtensionConfig `protobuf:"bytes,31,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"`
+ // Specifies optional bootstrap extensions to be instantiated at startup time.
+ // Each item contains extension specific configuration.
+ // [#extension-category: envoy.bootstrap]
+ BootstrapExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,21,rep,name=bootstrap_extensions,json=bootstrapExtensions,proto3" json:"bootstrap_extensions,omitempty"`
+ // Specifies optional extensions instantiated at startup time and
+ // invoked during crash time on the request that caused the crash.
+ FatalActions []*FatalAction `protobuf:"bytes,28,rep,name=fatal_actions,json=fatalActions,proto3" json:"fatal_actions,omitempty"`
+ // Configuration sources that will participate in
+ // xdstp:// URL authority resolution. The algorithm is as
+ // follows:
+ // 1. The authority field is taken from the xdstp:// URL, call
+ // this “resource_authority“.
+ // 2. “resource_authority“ is compared against the authorities in any peer
+ // “ConfigSource“. The peer “ConfigSource“ is the configuration source
+ // message which would have been used unconditionally for resolution
+ // with opaque resource names. If there is a match with an authority, the
+ // peer “ConfigSource“ message is used.
+ // 3. “resource_authority“ is compared sequentially with the authorities in
+ // each configuration source in “config_sources“. The first “ConfigSource“
+ // to match wins.
+ // 4. As a fallback, if no configuration source matches, then
+ // “default_config_source“ is used.
+ // 5. If “default_config_source“ is not specified, resolution fails.
+ //
+ // [#not-implemented-hide:]
+ ConfigSources []*v3.ConfigSource `protobuf:"bytes,22,rep,name=config_sources,json=configSources,proto3" json:"config_sources,omitempty"`
+ // Default configuration source for xdstp:// URLs if all
+ // other resolution fails.
+ // [#not-implemented-hide:]
+ DefaultConfigSource *v3.ConfigSource `protobuf:"bytes,23,opt,name=default_config_source,json=defaultConfigSource,proto3" json:"default_config_source,omitempty"`
+ // Optional overriding of default socket interface. The value must be the name of one of the
+ // socket interface factories initialized through a bootstrap extension
+ DefaultSocketInterface string `protobuf:"bytes,24,opt,name=default_socket_interface,json=defaultSocketInterface,proto3" json:"default_socket_interface,omitempty"`
+ // Global map of CertificateProvider instances. These instances are referred to by name in the
+ // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name
+ // `
+ // field.
+ // [#not-implemented-hide:]
+ CertificateProviderInstances map[string]*v3.TypedExtensionConfig `protobuf:"bytes,25,rep,name=certificate_provider_instances,json=certificateProviderInstances,proto3" json:"certificate_provider_instances,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Specifies a set of headers that need to be registered as inline header. This configuration
+ // allows users to customize the inline headers on-demand at Envoy startup without modifying
+ // Envoy's source code.
+ //
+ // Note that the 'set-cookie' header cannot be registered as inline header.
+ InlineHeaders []*CustomInlineHeader `protobuf:"bytes,32,rep,name=inline_headers,json=inlineHeaders,proto3" json:"inline_headers,omitempty"`
+ // Optional path to a file with performance tracing data created by "Perfetto" SDK in binary
+ // ProtoBuf format. The default value is "envoy.pftrace".
+ PerfTracingFilePath string `protobuf:"bytes,33,opt,name=perf_tracing_file_path,json=perfTracingFilePath,proto3" json:"perf_tracing_file_path,omitempty"`
+ // Optional overriding of default regex engine.
+ // If the value is not specified, Google RE2 will be used by default.
+ // [#extension-category: envoy.regex_engines]
+ DefaultRegexEngine *v3.TypedExtensionConfig `protobuf:"bytes,34,opt,name=default_regex_engine,json=defaultRegexEngine,proto3" json:"default_regex_engine,omitempty"`
+ // Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both
+ // fetch and load events during xDS processing.
+ // If a value is not specified, no XdsResourcesDelegate will be used.
+ // TODO(abeyad): Add public-facing documentation.
+ // [#not-implemented-hide:]
+ XdsDelegateExtension *v3.TypedExtensionConfig `protobuf:"bytes,35,opt,name=xds_delegate_extension,json=xdsDelegateExtension,proto3" json:"xds_delegate_extension,omitempty"`
+ // Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components,
+ // e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to
+ // process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used.
+ //
+ // .. note::
+ //
+ // There are no in-repo extensions currently, and the :repo:`XdsConfigTracker `
+ // interface should be implemented before using.
+ // See :repo:`xds_config_tracker_integration_test `
+ // for an example usage of the interface.
+ XdsConfigTrackerExtension *v3.TypedExtensionConfig `protobuf:"bytes,36,opt,name=xds_config_tracker_extension,json=xdsConfigTrackerExtension,proto3" json:"xds_config_tracker_extension,omitempty"`
+ // [#not-implemented-hide:]
+ // This controls the type of listener manager configured for Envoy. Currently
+ // Envoy only supports ListenerManager for this field and Envoy Mobile
+ // supports ApiListenerManager.
+ ListenerManager *v3.TypedExtensionConfig `protobuf:"bytes,37,opt,name=listener_manager,json=listenerManager,proto3" json:"listener_manager,omitempty"`
+ // Optional application log configuration.
+ ApplicationLogConfig *Bootstrap_ApplicationLogConfig `protobuf:"bytes,38,opt,name=application_log_config,json=applicationLogConfig,proto3" json:"application_log_config,omitempty"`
+ // Optional gRPC async manager config.
+ GrpcAsyncClientManagerConfig *Bootstrap_GrpcAsyncClientManagerConfig `protobuf:"bytes,40,opt,name=grpc_async_client_manager_config,json=grpcAsyncClientManagerConfig,proto3" json:"grpc_async_client_manager_config,omitempty"`
+ // Optional configuration for memory allocation manager.
+ // Memory releasing is only supported for `tcmalloc allocator `_.
+ MemoryAllocatorManager *MemoryAllocatorManager `protobuf:"bytes,41,opt,name=memory_allocator_manager,json=memoryAllocatorManager,proto3" json:"memory_allocator_manager,omitempty"`
+}
+
+func (x *Bootstrap) Reset() {
+ *x = Bootstrap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap) ProtoMessage() {}
+
+func (x *Bootstrap) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap.ProtoReflect.Descriptor instead.
+func (*Bootstrap) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Bootstrap) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetNodeContextParams() []string {
+ if x != nil {
+ return x.NodeContextParams
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStaticResources() *Bootstrap_StaticResources {
+ if x != nil {
+ return x.StaticResources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDynamicResources() *Bootstrap_DynamicResources {
+ if x != nil {
+ return x.DynamicResources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetClusterManager() *ClusterManager {
+ if x != nil {
+ return x.ClusterManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetHdsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.HdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetFlagsPath() string {
+ if x != nil {
+ return x.FlagsPath
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetStatsSinks() []*v31.StatsSink {
+ if x != nil {
+ return x.StatsSinks
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDeferredStatOptions() *Bootstrap_DeferredStatOptions {
+ if x != nil {
+ return x.DeferredStatOptions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsConfig() *v31.StatsConfig {
+ if x != nil {
+ return x.StatsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsFlushInterval() *durationpb.Duration {
+ if x != nil {
+ return x.StatsFlushInterval
+ }
+ return nil
+}
+
+func (m *Bootstrap) GetStatsFlush() isBootstrap_StatsFlush {
+ if m != nil {
+ return m.StatsFlush
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsFlushOnAdmin() bool {
+ if x, ok := x.GetStatsFlush().(*Bootstrap_StatsFlushOnAdmin); ok {
+ return x.StatsFlushOnAdmin
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetWatchdog() *Watchdog {
+ if x != nil {
+ return x.Watchdog
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetWatchdogs() *Watchdogs {
+ if x != nil {
+ return x.Watchdogs
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetTracing() *v32.Tracing {
+ if x != nil {
+ return x.Tracing
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetLayeredRuntime() *LayeredRuntime {
+ if x != nil {
+ return x.LayeredRuntime
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetAdmin() *Admin {
+ if x != nil {
+ return x.Admin
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetOverloadManager() *v33.OverloadManager {
+ if x != nil {
+ return x.OverloadManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetEnableDispatcherStats() bool {
+ if x != nil {
+ return x.EnableDispatcherStats
+ }
+ return false
+}
+
+func (x *Bootstrap) GetHeaderPrefix() string {
+ if x != nil {
+ return x.HeaderPrefix
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetStatsServerVersionOverride() *wrapperspb.UInt64Value {
+ if x != nil {
+ return x.StatsServerVersionOverride
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetUseTcpForDnsLookups() bool {
+ if x != nil {
+ return x.UseTcpForDnsLookups
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetDnsResolutionConfig() *v3.DnsResolutionConfig {
+ if x != nil {
+ return x.DnsResolutionConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetTypedDnsResolverConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.TypedDnsResolverConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetBootstrapExtensions() []*v3.TypedExtensionConfig {
+ if x != nil {
+ return x.BootstrapExtensions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetFatalActions() []*FatalAction {
+ if x != nil {
+ return x.FatalActions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetConfigSources() []*v3.ConfigSource {
+ if x != nil {
+ return x.ConfigSources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDefaultConfigSource() *v3.ConfigSource {
+ if x != nil {
+ return x.DefaultConfigSource
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDefaultSocketInterface() string {
+ if x != nil {
+ return x.DefaultSocketInterface
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetCertificateProviderInstances() map[string]*v3.TypedExtensionConfig {
+ if x != nil {
+ return x.CertificateProviderInstances
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetInlineHeaders() []*CustomInlineHeader {
+ if x != nil {
+ return x.InlineHeaders
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetPerfTracingFilePath() string {
+ if x != nil {
+ return x.PerfTracingFilePath
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetDefaultRegexEngine() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.DefaultRegexEngine
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetXdsDelegateExtension() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.XdsDelegateExtension
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetXdsConfigTrackerExtension() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.XdsConfigTrackerExtension
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetListenerManager() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.ListenerManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetApplicationLogConfig() *Bootstrap_ApplicationLogConfig {
+ if x != nil {
+ return x.ApplicationLogConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetGrpcAsyncClientManagerConfig() *Bootstrap_GrpcAsyncClientManagerConfig {
+ if x != nil {
+ return x.GrpcAsyncClientManagerConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetMemoryAllocatorManager() *MemoryAllocatorManager {
+ if x != nil {
+ return x.MemoryAllocatorManager
+ }
+ return nil
+}
+
+type isBootstrap_StatsFlush interface {
+ isBootstrap_StatsFlush()
+}
+
+type Bootstrap_StatsFlushOnAdmin struct {
+ // Flush stats to sinks only when queried for on the admin interface. If set,
+ // a flush timer is not created. Only one of “stats_flush_on_admin“ or
+ // “stats_flush_interval“ can be set.
+ StatsFlushOnAdmin bool `protobuf:"varint,29,opt,name=stats_flush_on_admin,json=statsFlushOnAdmin,proto3,oneof"`
+}
+
+func (*Bootstrap_StatsFlushOnAdmin) isBootstrap_StatsFlush() {}
+
+// Administration interface :ref:`operations documentation
+// `.
+// [#next-free-field: 7]
+type Admin struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configuration for :ref:`access logs `
+ // emitted by the administration server.
+ AccessLog []*v34.AccessLog `protobuf:"bytes,5,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"`
+ // The path to write the access log for the administration server. If no
+ // access log is desired specify ‘/dev/null’. This is only required if
+ // :ref:`address ` is set.
+ // Deprecated in favor of “access_log“ which offers more options.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+ // The cpu profiler output path for the administration server. If no profile
+ // path is specified, the default is ‘/var/log/envoy/envoy.prof’.
+ ProfilePath string `protobuf:"bytes,2,opt,name=profile_path,json=profilePath,proto3" json:"profile_path,omitempty"`
+ // The TCP address that the administration server will listen on.
+ // If not specified, Envoy will not start an administration server.
+ Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
+ // Additional socket options that may not be present in Envoy source code or
+ // precompiled binaries.
+ SocketOptions []*v3.SocketOption `protobuf:"bytes,4,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+ // Indicates whether :ref:`global_downstream_max_connections `
+ // should apply to the admin interface or not.
+ IgnoreGlobalConnLimit bool `protobuf:"varint,6,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"`
+}
+
+func (x *Admin) Reset() {
+ *x = Admin{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Admin) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Admin) ProtoMessage() {}
+
+func (x *Admin) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Admin.ProtoReflect.Descriptor instead.
+func (*Admin) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Admin) GetAccessLog() []*v34.AccessLog {
+ if x != nil {
+ return x.AccessLog
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Admin) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+func (x *Admin) GetProfilePath() string {
+ if x != nil {
+ return x.ProfilePath
+ }
+ return ""
+}
+
+func (x *Admin) GetAddress() *v3.Address {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *Admin) GetSocketOptions() []*v3.SocketOption {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+func (x *Admin) GetIgnoreGlobalConnLimit() bool {
+ if x != nil {
+ return x.IgnoreGlobalConnLimit
+ }
+ return false
+}
+
+// Cluster manager :ref:`architecture overview `.
+// [#next-free-field: 6]
+type ClusterManager struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the local cluster (i.e., the cluster that owns the Envoy running
+ // this configuration). In order to enable :ref:`zone aware routing
+ // ` this option must be set.
+ // If “local_cluster_name“ is defined then :ref:`clusters
+ // ` must be defined in the :ref:`Bootstrap
+ // static cluster resources
+ // `. This is unrelated to
+ // the :option:`--service-cluster` option which does not `affect zone aware
+ // routing `_.
+ LocalClusterName string `protobuf:"bytes,1,opt,name=local_cluster_name,json=localClusterName,proto3" json:"local_cluster_name,omitempty"`
+ // Optional global configuration for outlier detection.
+ OutlierDetection *ClusterManager_OutlierDetection `protobuf:"bytes,2,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"`
+ // Optional configuration used to bind newly established upstream connections.
+ // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.
+ UpstreamBindConfig *v3.BindConfig `protobuf:"bytes,3,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"`
+ // A management server endpoint to stream load stats to via
+ // “StreamLoadStats“. This must have :ref:`api_type
+ // ` :ref:`GRPC
+ // `.
+ LoadStatsConfig *v3.ApiConfigSource `protobuf:"bytes,4,opt,name=load_stats_config,json=loadStatsConfig,proto3" json:"load_stats_config,omitempty"`
+ // Whether the ClusterManager will create clusters on the worker threads
+ // inline during requests. This will save memory and CPU cycles in cases where
+ // there are lots of inactive clusters and > 1 worker thread.
+ EnableDeferredClusterCreation bool `protobuf:"varint,5,opt,name=enable_deferred_cluster_creation,json=enableDeferredClusterCreation,proto3" json:"enable_deferred_cluster_creation,omitempty"`
+}
+
+func (x *ClusterManager) Reset() {
+ *x = ClusterManager{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterManager) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterManager) ProtoMessage() {}
+
+func (x *ClusterManager) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterManager.ProtoReflect.Descriptor instead.
+func (*ClusterManager) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ClusterManager) GetLocalClusterName() string {
+ if x != nil {
+ return x.LocalClusterName
+ }
+ return ""
+}
+
+func (x *ClusterManager) GetOutlierDetection() *ClusterManager_OutlierDetection {
+ if x != nil {
+ return x.OutlierDetection
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetUpstreamBindConfig() *v3.BindConfig {
+ if x != nil {
+ return x.UpstreamBindConfig
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetLoadStatsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.LoadStatsConfig
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetEnableDeferredClusterCreation() bool {
+ if x != nil {
+ return x.EnableDeferredClusterCreation
+ }
+ return false
+}
+
+// Allows you to specify different watchdog configs for different subsystems.
+// This allows finer tuned policies for the watchdog. If a subsystem is omitted
+// the default values for that system will be used.
+type Watchdogs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Watchdog for the main thread.
+ MainThreadWatchdog *Watchdog `protobuf:"bytes,1,opt,name=main_thread_watchdog,json=mainThreadWatchdog,proto3" json:"main_thread_watchdog,omitempty"`
+ // Watchdog for the worker threads.
+ WorkerWatchdog *Watchdog `protobuf:"bytes,2,opt,name=worker_watchdog,json=workerWatchdog,proto3" json:"worker_watchdog,omitempty"`
+}
+
+func (x *Watchdogs) Reset() {
+ *x = Watchdogs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdogs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdogs) ProtoMessage() {}
+
+func (x *Watchdogs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdogs.ProtoReflect.Descriptor instead.
+func (*Watchdogs) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Watchdogs) GetMainThreadWatchdog() *Watchdog {
+ if x != nil {
+ return x.MainThreadWatchdog
+ }
+ return nil
+}
+
+func (x *Watchdogs) GetWorkerWatchdog() *Watchdog {
+ if x != nil {
+ return x.WorkerWatchdog
+ }
+ return nil
+}
+
+// Envoy process watchdog configuration. When configured, this monitors for
+// nonresponsive threads and kills the process after the configured thresholds.
+// See the :ref:`watchdog documentation ` for more information.
+// [#next-free-field: 8]
+type Watchdog struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Register actions that will fire on given WatchDog events.
+ // See “WatchDogAction“ for priority of events.
+ Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"`
+ // The duration after which Envoy counts a nonresponsive thread in the
+ // “watchdog_miss“ statistic. If not specified the default is 200ms.
+ MissTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"`
+ // The duration after which Envoy counts a nonresponsive thread in the
+ // “watchdog_mega_miss“ statistic. If not specified the default is
+ // 1000ms.
+ MegamissTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"`
+ // If a watched thread has been nonresponsive for this duration, assume a
+ // programming error and kill the entire Envoy process. Set to 0 to disable
+ // kill behavior. If not specified the default is 0 (disabled).
+ KillTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"`
+ // Defines the maximum jitter used to adjust the “kill_timeout“ if “kill_timeout“ is
+ // enabled. Enabling this feature would help to reduce risk of synchronized
+ // watchdog kill events across proxies due to external triggers. Set to 0 to
+ // disable. If not specified the default is 0 (disabled).
+ MaxKillTimeoutJitter *durationpb.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"`
+ // If “max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))“
+ // threads have been nonresponsive for at least this duration kill the entire
+ // Envoy process. Set to 0 to disable this behavior. If not specified the
+ // default is 0 (disabled).
+ MultikillTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"`
+ // Sets the threshold for “multikill_timeout“ in terms of the percentage of
+ // nonresponsive threads required for the “multikill_timeout“.
+ // If not specified the default is 0.
+ MultikillThreshold *v35.Percent `protobuf:"bytes,5,opt,name=multikill_threshold,json=multikillThreshold,proto3" json:"multikill_threshold,omitempty"`
+}
+
+func (x *Watchdog) Reset() {
+ *x = Watchdog{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdog) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdog) ProtoMessage() {}
+
+func (x *Watchdog) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdog.ProtoReflect.Descriptor instead.
+func (*Watchdog) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Watchdog) GetActions() []*Watchdog_WatchdogAction {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMissTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MissTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMegamissTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MegamissTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetKillTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.KillTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMaxKillTimeoutJitter() *durationpb.Duration {
+ if x != nil {
+ return x.MaxKillTimeoutJitter
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMultikillTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MultikillTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMultikillThreshold() *v35.Percent {
+ if x != nil {
+ return x.MultikillThreshold
+ }
+ return nil
+}
+
+// Fatal actions to run while crashing. Actions can be safe (meaning they are
+// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions.
+// If using an unsafe action that could get stuck or deadlock, it important to
+// have an out of band system to terminate the process.
+//
+// The interface for the extension is “Envoy::Server::Configuration::FatalAction“.
+// “FatalAction“ extensions live in the “envoy.extensions.fatal_actions“ API
+// namespace.
+type FatalAction struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Extension specific configuration for the action. It's expected to conform
+ // to the “Envoy::Server::Configuration::FatalAction“ interface.
+ Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+}
+
+func (x *FatalAction) Reset() {
+ *x = FatalAction{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FatalAction) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FatalAction) ProtoMessage() {}
+
+func (x *FatalAction) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FatalAction.ProtoReflect.Descriptor instead.
+func (*FatalAction) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *FatalAction) GetConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+// Runtime :ref:`configuration overview ` (deprecated).
+type Runtime struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The implementation assumes that the file system tree is accessed via a
+ // symbolic link. An atomic link swap is used when a new tree should be
+ // switched to. This parameter specifies the path to the symbolic link. Envoy
+ // will watch the location for changes and reload the file system tree when
+ // they happen. If this parameter is not set, there will be no disk based
+ // runtime.
+ SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"`
+ // Specifies the subdirectory to load within the root directory. This is
+ // useful if multiple systems share the same delivery mechanism. Envoy
+ // configuration elements can be contained in a dedicated subdirectory.
+ Subdirectory string `protobuf:"bytes,2,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"`
+ // Specifies an optional subdirectory to load within the root directory. If
+ // specified and the directory exists, configuration values within this
+ // directory will override those found in the primary subdirectory. This is
+ // useful when Envoy is deployed across many different types of servers.
+ // Sometimes it is useful to have a per service cluster directory for runtime
+ // configuration. See below for exactly how the override directory is used.
+ OverrideSubdirectory string `protobuf:"bytes,3,opt,name=override_subdirectory,json=overrideSubdirectory,proto3" json:"override_subdirectory,omitempty"`
+ // Static base runtime. This will be :ref:`overridden
+ // ` by other runtime layers, e.g.
+ // disk or admin. This follows the :ref:`runtime protobuf JSON representation
+ // encoding `.
+ Base *structpb.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"`
+}
+
+func (x *Runtime) Reset() {
+ *x = Runtime{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Runtime) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Runtime) ProtoMessage() {}
+
+func (x *Runtime) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Runtime.ProtoReflect.Descriptor instead.
+func (*Runtime) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Runtime) GetSymlinkRoot() string {
+ if x != nil {
+ return x.SymlinkRoot
+ }
+ return ""
+}
+
+func (x *Runtime) GetSubdirectory() string {
+ if x != nil {
+ return x.Subdirectory
+ }
+ return ""
+}
+
+func (x *Runtime) GetOverrideSubdirectory() string {
+ if x != nil {
+ return x.OverrideSubdirectory
+ }
+ return ""
+}
+
+func (x *Runtime) GetBase() *structpb.Struct {
+ if x != nil {
+ return x.Base
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type RuntimeLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Descriptive name for the runtime layer. This is only used for the runtime
+ // :http:get:`/runtime` output.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to LayerSpecifier:
+ //
+ // *RuntimeLayer_StaticLayer
+ // *RuntimeLayer_DiskLayer_
+ // *RuntimeLayer_AdminLayer_
+ // *RuntimeLayer_RtdsLayer_
+ LayerSpecifier isRuntimeLayer_LayerSpecifier `protobuf_oneof:"layer_specifier"`
+}
+
+func (x *RuntimeLayer) Reset() {
+ *x = RuntimeLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeLayer) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *RuntimeLayer) GetLayerSpecifier() isRuntimeLayer_LayerSpecifier {
+ if m != nil {
+ return m.LayerSpecifier
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetStaticLayer() *structpb.Struct {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_StaticLayer); ok {
+ return x.StaticLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetDiskLayer() *RuntimeLayer_DiskLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_DiskLayer_); ok {
+ return x.DiskLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetAdminLayer() *RuntimeLayer_AdminLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_AdminLayer_); ok {
+ return x.AdminLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetRtdsLayer() *RuntimeLayer_RtdsLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_RtdsLayer_); ok {
+ return x.RtdsLayer
+ }
+ return nil
+}
+
+type isRuntimeLayer_LayerSpecifier interface {
+ isRuntimeLayer_LayerSpecifier()
+}
+
+type RuntimeLayer_StaticLayer struct {
+ // :ref:`Static runtime ` layer.
+ // This follows the :ref:`runtime protobuf JSON representation encoding
+ // `. Unlike static xDS resources, this static
+ // layer is overridable by later layers in the runtime virtual filesystem.
+ StaticLayer *structpb.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_DiskLayer_ struct {
+ DiskLayer *RuntimeLayer_DiskLayer `protobuf:"bytes,3,opt,name=disk_layer,json=diskLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_AdminLayer_ struct {
+ AdminLayer *RuntimeLayer_AdminLayer `protobuf:"bytes,4,opt,name=admin_layer,json=adminLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_RtdsLayer_ struct {
+ RtdsLayer *RuntimeLayer_RtdsLayer `protobuf:"bytes,5,opt,name=rtds_layer,json=rtdsLayer,proto3,oneof"`
+}
+
+func (*RuntimeLayer_StaticLayer) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_DiskLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_AdminLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_RtdsLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+// Runtime :ref:`configuration overview `.
+type LayeredRuntime struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The :ref:`layers ` of the runtime. This is ordered
+ // such that later layers in the list overlay earlier entries.
+ Layers []*RuntimeLayer `protobuf:"bytes,1,rep,name=layers,proto3" json:"layers,omitempty"`
+}
+
+func (x *LayeredRuntime) Reset() {
+ *x = LayeredRuntime{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LayeredRuntime) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LayeredRuntime) ProtoMessage() {}
+
+func (x *LayeredRuntime) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LayeredRuntime.ProtoReflect.Descriptor instead.
+func (*LayeredRuntime) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *LayeredRuntime) GetLayers() []*RuntimeLayer {
+ if x != nil {
+ return x.Layers
+ }
+ return nil
+}
+
+// Used to specify the header that needs to be registered as an inline header.
+//
+// If request or response contain multiple headers with the same name and the header
+// name is registered as an inline header. Then multiple headers will be folded
+// into one, and multiple header values will be concatenated by a suitable delimiter.
+// The delimiter is generally a comma.
+//
+// For example, if 'foo' is registered as an inline header, and the headers contains
+// the following two headers:
+//
+// .. code-block:: text
+//
+// foo: bar
+// foo: eep
+//
+// Then they will eventually be folded into:
+//
+// .. code-block:: text
+//
+// foo: bar, eep
+//
+// Inline headers provide O(1) search performance, but each inline header imposes
+// an additional memory overhead on all instances of the corresponding type of
+// HeaderMap or TrailerMap.
+type CustomInlineHeader struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the header that is expected to be set as the inline header.
+ InlineHeaderName string `protobuf:"bytes,1,opt,name=inline_header_name,json=inlineHeaderName,proto3" json:"inline_header_name,omitempty"`
+ // The type of the header that is expected to be set as the inline header.
+ InlineHeaderType CustomInlineHeader_InlineHeaderType `protobuf:"varint,2,opt,name=inline_header_type,json=inlineHeaderType,proto3,enum=envoy.config.bootstrap.v3.CustomInlineHeader_InlineHeaderType" json:"inline_header_type,omitempty"`
+}
+
+func (x *CustomInlineHeader) Reset() {
+ *x = CustomInlineHeader{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CustomInlineHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CustomInlineHeader) ProtoMessage() {}
+
+func (x *CustomInlineHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CustomInlineHeader.ProtoReflect.Descriptor instead.
+func (*CustomInlineHeader) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *CustomInlineHeader) GetInlineHeaderName() string {
+ if x != nil {
+ return x.InlineHeaderName
+ }
+ return ""
+}
+
+func (x *CustomInlineHeader) GetInlineHeaderType() CustomInlineHeader_InlineHeaderType {
+ if x != nil {
+ return x.InlineHeaderType
+ }
+ return CustomInlineHeader_REQUEST_HEADER
+}
+
+type MemoryAllocatorManager struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configures tcmalloc to perform background release of free memory in amount of bytes per “memory_release_interval“ interval.
+ // If equals to “0“, no memory release will occur. Defaults to “0“.
+ BytesToRelease uint64 `protobuf:"varint,1,opt,name=bytes_to_release,json=bytesToRelease,proto3" json:"bytes_to_release,omitempty"`
+ // Interval in milliseconds for memory releasing. If specified, during every
+ // interval Envoy will try to release “bytes_to_release“ of free memory back to operating system for reuse.
+ // Defaults to 1000 milliseconds.
+ MemoryReleaseInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=memory_release_interval,json=memoryReleaseInterval,proto3" json:"memory_release_interval,omitempty"`
+}
+
+func (x *MemoryAllocatorManager) Reset() {
+ *x = MemoryAllocatorManager{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryAllocatorManager) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryAllocatorManager) ProtoMessage() {}
+
+func (x *MemoryAllocatorManager) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryAllocatorManager.ProtoReflect.Descriptor instead.
+func (*MemoryAllocatorManager) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *MemoryAllocatorManager) GetBytesToRelease() uint64 {
+ if x != nil {
+ return x.BytesToRelease
+ }
+ return 0
+}
+
+func (x *MemoryAllocatorManager) GetMemoryReleaseInterval() *durationpb.Duration {
+ if x != nil {
+ return x.MemoryReleaseInterval
+ }
+ return nil
+}
+
+type Bootstrap_StaticResources struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Static :ref:`Listeners `. These listeners are
+ // available regardless of LDS configuration.
+ Listeners []*v36.Listener `protobuf:"bytes,1,rep,name=listeners,proto3" json:"listeners,omitempty"`
+ // If a network based configuration source is specified for :ref:`cds_config
+ // `, it's necessary
+ // to have some initial cluster definitions available to allow Envoy to know
+ // how to speak to the management server. These cluster definitions may not
+ // use :ref:`EDS ` (i.e. they should be static
+ // IP or DNS-based).
+ Clusters []*v37.Cluster `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"`
+ // These static secrets can be used by :ref:`SdsSecretConfig
+ // `
+ Secrets []*v38.Secret `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty"`
+}
+
+func (x *Bootstrap_StaticResources) Reset() {
+ *x = Bootstrap_StaticResources{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_StaticResources) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_StaticResources) ProtoMessage() {}
+
+func (x *Bootstrap_StaticResources) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_StaticResources.ProtoReflect.Descriptor instead.
+func (*Bootstrap_StaticResources) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Bootstrap_StaticResources) GetListeners() []*v36.Listener {
+ if x != nil {
+ return x.Listeners
+ }
+ return nil
+}
+
+func (x *Bootstrap_StaticResources) GetClusters() []*v37.Cluster {
+ if x != nil {
+ return x.Clusters
+ }
+ return nil
+}
+
+func (x *Bootstrap_StaticResources) GetSecrets() []*v38.Secret {
+ if x != nil {
+ return x.Secrets
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type Bootstrap_DynamicResources struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // All :ref:`Listeners ` are provided by a single
+ // :ref:`LDS ` configuration source.
+ LdsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=lds_config,json=ldsConfig,proto3" json:"lds_config,omitempty"`
+ // xdstp:// resource locator for listener collection.
+ // [#not-implemented-hide:]
+ LdsResourcesLocator string `protobuf:"bytes,5,opt,name=lds_resources_locator,json=ldsResourcesLocator,proto3" json:"lds_resources_locator,omitempty"`
+ // All post-bootstrap :ref:`Cluster ` definitions are
+ // provided by a single :ref:`CDS `
+ // configuration source.
+ CdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=cds_config,json=cdsConfig,proto3" json:"cds_config,omitempty"`
+ // xdstp:// resource locator for cluster collection.
+ // [#not-implemented-hide:]
+ CdsResourcesLocator string `protobuf:"bytes,6,opt,name=cds_resources_locator,json=cdsResourcesLocator,proto3" json:"cds_resources_locator,omitempty"`
+ // A single :ref:`ADS ` source may be optionally
+ // specified. This must have :ref:`api_type
+ // ` :ref:`GRPC
+ // `. Only
+ // :ref:`ConfigSources ` that have
+ // the :ref:`ads ` field set will be
+ // streamed on the ADS channel.
+ AdsConfig *v3.ApiConfigSource `protobuf:"bytes,3,opt,name=ads_config,json=adsConfig,proto3" json:"ads_config,omitempty"`
+}
+
+func (x *Bootstrap_DynamicResources) Reset() {
+ *x = Bootstrap_DynamicResources{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_DynamicResources) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_DynamicResources) ProtoMessage() {}
+
+func (x *Bootstrap_DynamicResources) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_DynamicResources.ProtoReflect.Descriptor instead.
+func (*Bootstrap_DynamicResources) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Bootstrap_DynamicResources) GetLdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.LdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap_DynamicResources) GetLdsResourcesLocator() string {
+ if x != nil {
+ return x.LdsResourcesLocator
+ }
+ return ""
+}
+
+func (x *Bootstrap_DynamicResources) GetCdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.CdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap_DynamicResources) GetCdsResourcesLocator() string {
+ if x != nil {
+ return x.CdsResourcesLocator
+ }
+ return ""
+}
+
+func (x *Bootstrap_DynamicResources) GetAdsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.AdsConfig
+ }
+ return nil
+}
+
+type Bootstrap_ApplicationLogConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional field to set the application logs format. If this field is set, it will override
+ // the default log format. Setting both this field and :option:`--log-format` command line
+ // option is not allowed, and will cause a bootstrap error.
+ LogFormat *Bootstrap_ApplicationLogConfig_LogFormat `protobuf:"bytes,1,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"`
+}
+
+func (x *Bootstrap_ApplicationLogConfig) Reset() {
+ *x = Bootstrap_ApplicationLogConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_ApplicationLogConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_ApplicationLogConfig) ProtoMessage() {}
+
+func (x *Bootstrap_ApplicationLogConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_ApplicationLogConfig.ProtoReflect.Descriptor instead.
+func (*Bootstrap_ApplicationLogConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Bootstrap_ApplicationLogConfig) GetLogFormat() *Bootstrap_ApplicationLogConfig_LogFormat {
+ if x != nil {
+ return x.LogFormat
+ }
+ return nil
+}
+
+type Bootstrap_DeferredStatOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below).
+ // This will save memory and CPU cycles when creating the objects that own these stats, if those
+ // stats are never referenced throughout the lifetime of the process. However, it will incur additional
+ // memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats
+ // is updated for the first time.
+ // Groups of stats that will be lazily initialized:
+ // - Cluster traffic stats: a subgroup of the :ref:`cluster statistics `
+ // that are used when requests are routed to the cluster.
+ EnableDeferredCreationStats bool `protobuf:"varint,1,opt,name=enable_deferred_creation_stats,json=enableDeferredCreationStats,proto3" json:"enable_deferred_creation_stats,omitempty"`
+}
+
+func (x *Bootstrap_DeferredStatOptions) Reset() {
+ *x = Bootstrap_DeferredStatOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_DeferredStatOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_DeferredStatOptions) ProtoMessage() {}
+
+func (x *Bootstrap_DeferredStatOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_DeferredStatOptions.ProtoReflect.Descriptor instead.
+func (*Bootstrap_DeferredStatOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *Bootstrap_DeferredStatOptions) GetEnableDeferredCreationStats() bool {
+ if x != nil {
+ return x.EnableDeferredCreationStats
+ }
+ return false
+}
+
+type Bootstrap_GrpcAsyncClientManagerConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional field to set the expiration time for the cached gRPC client object.
+ // The minimal value is 5s and the default is 50s.
+ MaxCachedEntryIdleDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"`
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) Reset() {
+ *x = Bootstrap_GrpcAsyncClientManagerConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_GrpcAsyncClientManagerConfig) ProtoMessage() {}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_GrpcAsyncClientManagerConfig.ProtoReflect.Descriptor instead.
+func (*Bootstrap_GrpcAsyncClientManagerConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *durationpb.Duration {
+ if x != nil {
+ return x.MaxCachedEntryIdleDuration
+ }
+ return nil
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to LogFormat:
+ //
+ // *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat
+ // *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat
+ LogFormat isBootstrap_ApplicationLogConfig_LogFormat_LogFormat `protobuf_oneof:"log_format"`
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) Reset() {
+ *x = Bootstrap_ApplicationLogConfig_LogFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat) ProtoMessage() {}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_ApplicationLogConfig_LogFormat.ProtoReflect.Descriptor instead.
+func (*Bootstrap_ApplicationLogConfig_LogFormat) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) GetLogFormat() isBootstrap_ApplicationLogConfig_LogFormat_LogFormat {
+ if m != nil {
+ return m.LogFormat
+ }
+ return nil
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *structpb.Struct {
+ if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok {
+ return x.JsonFormat
+ }
+ return nil
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetTextFormat() string {
+ if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok {
+ return x.TextFormat
+ }
+ return ""
+}
+
+type isBootstrap_ApplicationLogConfig_LogFormat_LogFormat interface {
+ isBootstrap_ApplicationLogConfig_LogFormat_LogFormat()
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat struct {
+ // Flush application logs in JSON format. The configured JSON struct can
+ // support all the format flags specified in the :option:`--log-format`
+ // command line options section, except for the “%v“ and “%_“ flags.
+ JsonFormat *structpb.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"`
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat_TextFormat struct {
+ // Flush application log in a format defined by a string. The text format
+ // can support all the format flags specified in the :option:`--log-format`
+ // command line option section.
+ TextFormat string `protobuf:"bytes,2,opt,name=text_format,json=textFormat,proto3,oneof"`
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() {
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() {
+}
+
+type ClusterManager_OutlierDetection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the path to the outlier event log.
+ EventLogPath string `protobuf:"bytes,1,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"`
+ // [#not-implemented-hide:]
+ // The gRPC service for the outlier detection event service.
+ // If empty, outlier detection events won't be sent to a remote endpoint.
+ EventService *v3.EventServiceConfig `protobuf:"bytes,2,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"`
+}
+
+func (x *ClusterManager_OutlierDetection) Reset() {
+ *x = ClusterManager_OutlierDetection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterManager_OutlierDetection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterManager_OutlierDetection) ProtoMessage() {}
+
+func (x *ClusterManager_OutlierDetection) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterManager_OutlierDetection.ProtoReflect.Descriptor instead.
+func (*ClusterManager_OutlierDetection) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *ClusterManager_OutlierDetection) GetEventLogPath() string {
+ if x != nil {
+ return x.EventLogPath
+ }
+ return ""
+}
+
+func (x *ClusterManager_OutlierDetection) GetEventService() *v3.EventServiceConfig {
+ if x != nil {
+ return x.EventService
+ }
+ return nil
+}
+
+type Watchdog_WatchdogAction struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Extension specific configuration for the action.
+ Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+ Event Watchdog_WatchdogAction_WatchdogEvent `protobuf:"varint,2,opt,name=event,proto3,enum=envoy.config.bootstrap.v3.Watchdog_WatchdogAction_WatchdogEvent" json:"event,omitempty"`
+}
+
+func (x *Watchdog_WatchdogAction) Reset() {
+ *x = Watchdog_WatchdogAction{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdog_WatchdogAction) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdog_WatchdogAction) ProtoMessage() {}
+
+func (x *Watchdog_WatchdogAction) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdog_WatchdogAction.ProtoReflect.Descriptor instead.
+func (*Watchdog_WatchdogAction) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *Watchdog_WatchdogAction) GetConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *Watchdog_WatchdogAction) GetEvent() Watchdog_WatchdogAction_WatchdogEvent {
+ if x != nil {
+ return x.Event
+ }
+ return Watchdog_WatchdogAction_UNKNOWN
+}
+
+// :ref:`Disk runtime ` layer.
+type RuntimeLayer_DiskLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The implementation assumes that the file system tree is accessed via a
+ // symbolic link. An atomic link swap is used when a new tree should be
+ // switched to. This parameter specifies the path to the symbolic link.
+ // Envoy will watch the location for changes and reload the file system tree
+ // when they happen. See documentation on runtime :ref:`atomicity
+ // ` for further details on how reloads are
+ // treated.
+ SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"`
+ // Specifies the subdirectory to load within the root directory. This is
+ // useful if multiple systems share the same delivery mechanism. Envoy
+ // configuration elements can be contained in a dedicated subdirectory.
+ Subdirectory string `protobuf:"bytes,3,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"`
+ // :ref:`Append ` the
+ // service cluster to the path under symlink root.
+ AppendServiceCluster bool `protobuf:"varint,2,opt,name=append_service_cluster,json=appendServiceCluster,proto3" json:"append_service_cluster,omitempty"`
+}
+
+func (x *RuntimeLayer_DiskLayer) Reset() {
+ *x = RuntimeLayer_DiskLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_DiskLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_DiskLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_DiskLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_DiskLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_DiskLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 0}
+}
+
+func (x *RuntimeLayer_DiskLayer) GetSymlinkRoot() string {
+ if x != nil {
+ return x.SymlinkRoot
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_DiskLayer) GetSubdirectory() string {
+ if x != nil {
+ return x.Subdirectory
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_DiskLayer) GetAppendServiceCluster() bool {
+ if x != nil {
+ return x.AppendServiceCluster
+ }
+ return false
+}
+
+// :ref:`Admin console runtime ` layer.
+type RuntimeLayer_AdminLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RuntimeLayer_AdminLayer) Reset() {
+ *x = RuntimeLayer_AdminLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_AdminLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_AdminLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_AdminLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_AdminLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_AdminLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 1}
+}
+
+// :ref:`Runtime Discovery Service (RTDS) ` layer.
+type RuntimeLayer_RtdsLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Resource to subscribe to at “rtds_config“ for the RTDS layer.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // RTDS configuration source.
+ RtdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=rtds_config,json=rtdsConfig,proto3" json:"rtds_config,omitempty"`
+}
+
+func (x *RuntimeLayer_RtdsLayer) Reset() {
+ *x = RuntimeLayer_RtdsLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_RtdsLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_RtdsLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_RtdsLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_RtdsLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_RtdsLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 2}
+}
+
+func (x *RuntimeLayer_RtdsLayer) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_RtdsLayer) GetRtdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.RtdsConfig
+ }
+ return nil
+}
+
+var File_envoy_config_bootstrap_v3_bootstrap_proto protoreflect.FileDescriptor
+
+var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76,
+ 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70,
+ 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0x99, 0x24, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12,
+ 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12,
+ 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6e, 0x6f,
+ 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+ 0x5f, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52,
+ 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x12, 0x62, 0x0a, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73,
+ 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x73, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x68, 0x64, 0x73, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x68, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a,
+ 0x0b, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e,
+ 0x6b, 0x73, 0x12, 0x6c, 0x0a, 0x15, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x27, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64,
+ 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x65, 0x66,
+ 0x65, 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x73, 0x74,
+ 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x24, 0xfa, 0x42, 0x0e, 0xaa, 0x01, 0x0b, 0x1a, 0x03, 0x08, 0xac, 0x02, 0x32,
+ 0x04, 0x10, 0xc0, 0x84, 0x3d, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x12, 0x0b, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46,
+ 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x14,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a,
+ 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73,
+ 0x68, 0x4f, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x08, 0x77, 0x61, 0x74, 0x63,
+ 0x68, 0x64, 0x6f, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x42,
+ 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x08, 0x77, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64,
+ 0x6f, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x52,
+ 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x07, 0x74, 0x72,
+ 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86,
+ 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e,
+ 0x67, 0x12, 0x52, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x75, 0x6e,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x5f, 0x0a,
+ 0x10, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x42, 0x09, 0x8a, 0x93, 0xb7, 0x2a, 0x04, 0x08, 0x01, 0x10, 0x01, 0x52, 0x0f, 0x6f,
+ 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x36,
+ 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5f, 0x0a, 0x1d, 0x73,
+ 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x1a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x41, 0x0a, 0x17,
+ 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f,
+ 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92,
+ 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54,
+ 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12,
+ 0x6a, 0x0a, 0x15, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04,
+ 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
+ 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x14, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0c, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49,
+ 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x15, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x13, 0x64, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x18, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6f, 0x63, 0x6b,
+ 0x65, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x1e,
+ 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x19,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1c, 0x63, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65,
+ 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x69, 0x6e,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x20, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x52, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x72, 0x66, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67,
+ 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x13, 0x70, 0x65, 0x72, 0x66, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x6c,
+ 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x22, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67,
+ 0x69, 0x6e, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x78, 0x64, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x23, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x14, 0x78, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6b, 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x16, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x20, 0x67,
+ 0x72, 0x70, 0x63, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x67, 0x72, 0x70, 0x63, 0x41, 0x73,
+ 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x18, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
+ 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x16, 0x6d, 0x65, 0x6d,
+ 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x72, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c,
+ 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a,
+ 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
+ 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61,
+ 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09,
+ 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36,
+ 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0xf9, 0x01, 0x0a,
+ 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x09,
+ 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, 0x4c, 0x6f, 0x67,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
+ 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, 0x65, 0x66, 0x65,
+ 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72,
+ 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44,
+ 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79,
+ 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x64,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04,
+ 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50,
+ 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04,
+ 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03,
+ 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f,
+ 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30,
+ 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74,
+ 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a,
+ 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69,
+ 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, 0x0e, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75,
+ 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
+ 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x20, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65,
+ 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44,
+ 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d,
+ 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f,
+ 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a,
+ 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55,
+ 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f,
+ 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74,
+ 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f,
+ 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68,
+ 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68,
+ 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c,
+ 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10,
+ 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11,
+ 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c,
+ 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69,
+ 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02,
+ 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e,
+ 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f,
+ 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64,
+ 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a,
+ 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d,
+ 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73,
+ 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f,
+ 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69,
+ 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53,
+ 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62,
+ 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a,
+ 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61,
+ 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52,
+ 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b,
+ 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79,
+ 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72,
+ 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64,
+ 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d,
+ 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72,
+ 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01,
+ 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73,
+ 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22,
+ 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a,
+ 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65,
+ 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a,
+ 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41,
+ 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74,
+ 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72,
+ 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e,
+ 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28,
+ 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65,
+ 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61,
+ 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12,
+ 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10,
+ 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70,
+ 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54,
+ 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51,
+ 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13,
+ 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45,
+ 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f,
+ 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x4d, 0x65,
+ 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e,
+ 0x61, 0x67, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f,
+ 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e,
+ 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x51,
+ 0x0a, 0x17, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x65, 0x6d, 0x6f,
+ 0x72, 0x79, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67,
+ 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce sync.Once
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc
+)
+
+func file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP() []byte {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce.Do(func() {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData)
+ })
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData
+}
+
+var file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{
+ (Watchdog_WatchdogAction_WatchdogEvent)(0), // 0: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent
+ (CustomInlineHeader_InlineHeaderType)(0), // 1: envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType
+ (*Bootstrap)(nil), // 2: envoy.config.bootstrap.v3.Bootstrap
+ (*Admin)(nil), // 3: envoy.config.bootstrap.v3.Admin
+ (*ClusterManager)(nil), // 4: envoy.config.bootstrap.v3.ClusterManager
+ (*Watchdogs)(nil), // 5: envoy.config.bootstrap.v3.Watchdogs
+ (*Watchdog)(nil), // 6: envoy.config.bootstrap.v3.Watchdog
+ (*FatalAction)(nil), // 7: envoy.config.bootstrap.v3.FatalAction
+ (*Runtime)(nil), // 8: envoy.config.bootstrap.v3.Runtime
+ (*RuntimeLayer)(nil), // 9: envoy.config.bootstrap.v3.RuntimeLayer
+ (*LayeredRuntime)(nil), // 10: envoy.config.bootstrap.v3.LayeredRuntime
+ (*CustomInlineHeader)(nil), // 11: envoy.config.bootstrap.v3.CustomInlineHeader
+ (*MemoryAllocatorManager)(nil), // 12: envoy.config.bootstrap.v3.MemoryAllocatorManager
+ (*Bootstrap_StaticResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.StaticResources
+ (*Bootstrap_DynamicResources)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.DynamicResources
+ (*Bootstrap_ApplicationLogConfig)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig
+ (*Bootstrap_DeferredStatOptions)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions
+ (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 17: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig
+ nil, // 18: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry
+ (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 19: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat
+ (*ClusterManager_OutlierDetection)(nil), // 20: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection
+ (*Watchdog_WatchdogAction)(nil), // 21: envoy.config.bootstrap.v3.Watchdog.WatchdogAction
+ (*RuntimeLayer_DiskLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer
+ (*RuntimeLayer_AdminLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer
+ (*RuntimeLayer_RtdsLayer)(nil), // 24: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer
+ (*v3.Node)(nil), // 25: envoy.config.core.v3.Node
+ (*v3.ApiConfigSource)(nil), // 26: envoy.config.core.v3.ApiConfigSource
+ (*v31.StatsSink)(nil), // 27: envoy.config.metrics.v3.StatsSink
+ (*v31.StatsConfig)(nil), // 28: envoy.config.metrics.v3.StatsConfig
+ (*durationpb.Duration)(nil), // 29: google.protobuf.Duration
+ (*v32.Tracing)(nil), // 30: envoy.config.trace.v3.Tracing
+ (*v33.OverloadManager)(nil), // 31: envoy.config.overload.v3.OverloadManager
+ (*wrapperspb.UInt64Value)(nil), // 32: google.protobuf.UInt64Value
+ (*v3.DnsResolutionConfig)(nil), // 33: envoy.config.core.v3.DnsResolutionConfig
+ (*v3.TypedExtensionConfig)(nil), // 34: envoy.config.core.v3.TypedExtensionConfig
+ (*v3.ConfigSource)(nil), // 35: envoy.config.core.v3.ConfigSource
+ (*v34.AccessLog)(nil), // 36: envoy.config.accesslog.v3.AccessLog
+ (*v3.Address)(nil), // 37: envoy.config.core.v3.Address
+ (*v3.SocketOption)(nil), // 38: envoy.config.core.v3.SocketOption
+ (*v3.BindConfig)(nil), // 39: envoy.config.core.v3.BindConfig
+ (*v35.Percent)(nil), // 40: envoy.type.v3.Percent
+ (*structpb.Struct)(nil), // 41: google.protobuf.Struct
+ (*v36.Listener)(nil), // 42: envoy.config.listener.v3.Listener
+ (*v37.Cluster)(nil), // 43: envoy.config.cluster.v3.Cluster
+ (*v38.Secret)(nil), // 44: envoy.extensions.transport_sockets.tls.v3.Secret
+ (*v3.EventServiceConfig)(nil), // 45: envoy.config.core.v3.EventServiceConfig
+}
+var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{
+ 25, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node
+ 13, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources
+ 14, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources
+ 4, // 3: envoy.config.bootstrap.v3.Bootstrap.cluster_manager:type_name -> envoy.config.bootstrap.v3.ClusterManager
+ 26, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 27, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink
+ 16, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions
+ 28, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig
+ 29, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration
+ 6, // 9: envoy.config.bootstrap.v3.Bootstrap.watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 5, // 10: envoy.config.bootstrap.v3.Bootstrap.watchdogs:type_name -> envoy.config.bootstrap.v3.Watchdogs
+ 30, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing
+ 10, // 12: envoy.config.bootstrap.v3.Bootstrap.layered_runtime:type_name -> envoy.config.bootstrap.v3.LayeredRuntime
+ 3, // 13: envoy.config.bootstrap.v3.Bootstrap.admin:type_name -> envoy.config.bootstrap.v3.Admin
+ 31, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager
+ 32, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value
+ 33, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig
+ 34, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 7, // 19: envoy.config.bootstrap.v3.Bootstrap.fatal_actions:type_name -> envoy.config.bootstrap.v3.FatalAction
+ 35, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource
+ 35, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource
+ 18, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry
+ 11, // 23: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader
+ 34, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 15, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig
+ 17, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig
+ 12, // 30: envoy.config.bootstrap.v3.Bootstrap.memory_allocator_manager:type_name -> envoy.config.bootstrap.v3.MemoryAllocatorManager
+ 36, // 31: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog
+ 37, // 32: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address
+ 38, // 33: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption
+ 20, // 34: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection
+ 39, // 35: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig
+ 26, // 36: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 6, // 37: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 6, // 38: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 21, // 39: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction
+ 29, // 40: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration
+ 29, // 41: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration
+ 29, // 42: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration
+ 29, // 43: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration
+ 29, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration
+ 40, // 45: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent
+ 34, // 46: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 41, // 47: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct
+ 41, // 48: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct
+ 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer
+ 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer
+ 24, // 51: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer
+ 9, // 52: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer
+ 1, // 53: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType
+ 29, // 54: envoy.config.bootstrap.v3.MemoryAllocatorManager.memory_release_interval:type_name -> google.protobuf.Duration
+ 42, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener
+ 43, // 56: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster
+ 44, // 57: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret
+ 35, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 35, // 59: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 26, // 60: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 19, // 61: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat
+ 29, // 62: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration
+ 34, // 63: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 41, // 64: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct
+ 45, // 65: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig
+ 34, // 66: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 0, // 67: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent
+ 35, // 68: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 69, // [69:69] is the sub-list for method output_type
+ 69, // [69:69] is the sub-list for method input_type
+ 69, // [69:69] is the sub-list for extension type_name
+ 69, // [69:69] is the sub-list for extension extendee
+ 0, // [0:69] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() }
+func file_envoy_config_bootstrap_v3_bootstrap_proto_init() {
+ if File_envoy_config_bootstrap_v3_bootstrap_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Admin); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterManager); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdogs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdog); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FatalAction); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Runtime); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LayeredRuntime); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CustomInlineHeader); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryAllocatorManager); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_StaticResources); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_DynamicResources); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_ApplicationLogConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_DeferredStatOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_GrpcAsyncClientManagerConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_ApplicationLogConfig_LogFormat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterManager_OutlierDetection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdog_WatchdogAction); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_DiskLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_AdminLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_RtdsLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Bootstrap_StatsFlushOnAdmin)(nil),
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].OneofWrappers = []interface{}{
+ (*RuntimeLayer_StaticLayer)(nil),
+ (*RuntimeLayer_DiskLayer_)(nil),
+ (*RuntimeLayer_AdminLayer_)(nil),
+ (*RuntimeLayer_RtdsLayer_)(nil),
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].OneofWrappers = []interface{}{
+ (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat)(nil),
+ (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes,
+ DependencyIndexes: file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs,
+ EnumInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes,
+ MessageInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes,
+ }.Build()
+ File_envoy_config_bootstrap_v3_bootstrap_proto = out.File
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = nil
+ file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = nil
+ file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go
new file mode 100644
index 000000000..55724c095
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go
@@ -0,0 +1,4501 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Bootstrap with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in BootstrapMultiError, or nil
+// if none found.
+func (m *Bootstrap) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetNode()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetStaticResources()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStaticResources()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDynamicResources()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDynamicResources()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetClusterManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetClusterManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetHdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for FlagsPath
+
+ for idx, item := range m.GetStatsSinks() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDeferredStatOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDeferredStatOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetStatsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetStatsFlushInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = BootstrapValidationError{
+ field: "StatsFlushInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ lt := time.Duration(300*time.Second + 0*time.Nanosecond)
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte || dur >= lt {
+ err := BootstrapValidationError{
+ field: "StatsFlushInterval",
+ reason: "value must be inside range [1ms, 5m0s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWatchdogs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWatchdogs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetTracing()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLayeredRuntime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLayeredRuntime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetAdmin()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdmin()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetOverloadManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOverloadManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableDispatcherStats
+
+ // no validation rules for HeaderPrefix
+
+ if all {
+ switch v := interface{}(m.GetStatsServerVersionOverride()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatsServerVersionOverride()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UseTcpForDnsLookups
+
+ if all {
+ switch v := interface{}(m.GetDnsResolutionConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetBootstrapExtensions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetFatalActions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetConfigSources() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultConfigSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for DefaultSocketInterface
+
+ {
+ sorted_keys := make([]string, len(m.GetCertificateProviderInstances()))
+ i := 0
+ for key := range m.GetCertificateProviderInstances() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetCertificateProviderInstances()[key]
+ _ = val
+
+ // no validation rules for CertificateProviderInstances[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ for idx, item := range m.GetInlineHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for PerfTracingFilePath
+
+ if all {
+ switch v := interface{}(m.GetDefaultRegexEngine()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultRegexEngine()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetXdsDelegateExtension()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetXdsDelegateExtension()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetXdsConfigTrackerExtension()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetXdsConfigTrackerExtension()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetListenerManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListenerManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetApplicationLogConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetApplicationLogConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetGrpcAsyncClientManagerConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcAsyncClientManagerConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMemoryAllocatorManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMemoryAllocatorManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.StatsFlush.(type) {
+ case *Bootstrap_StatsFlushOnAdmin:
+ if v == nil {
+ err := BootstrapValidationError{
+ field: "StatsFlush",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetStatsFlushOnAdmin() != true {
+ err := BootstrapValidationError{
+ field: "StatsFlushOnAdmin",
+ reason: "value must equal true",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return BootstrapMultiError(errors)
+ }
+
+ return nil
+}
+
+// BootstrapMultiError is an error wrapping multiple validation errors returned
+// by Bootstrap.ValidateAll() if the designated constraints aren't met.
+type BootstrapMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BootstrapMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BootstrapMultiError) AllErrors() []error { return m }
+
+// BootstrapValidationError is the validation error returned by
+// Bootstrap.Validate if the designated constraints aren't met.
+type BootstrapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BootstrapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BootstrapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BootstrapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BootstrapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BootstrapValidationError) ErrorName() string { return "BootstrapValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BootstrapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BootstrapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BootstrapValidationError{}
+
+// Validate checks the field values on Admin with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Admin) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Admin with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in AdminMultiError, or nil if none found.
+func (m *Admin) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Admin) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetAccessLog() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for AccessLogPath
+
+ // no validation rules for ProfilePath
+
+ if all {
+ switch v := interface{}(m.GetAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetSocketOptions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for IgnoreGlobalConnLimit
+
+ if len(errors) > 0 {
+ return AdminMultiError(errors)
+ }
+
+ return nil
+}
+
+// AdminMultiError is an error wrapping multiple validation errors returned by
+// Admin.ValidateAll() if the designated constraints aren't met.
+type AdminMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AdminMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AdminMultiError) AllErrors() []error { return m }
+
+// AdminValidationError is the validation error returned by Admin.Validate if
+// the designated constraints aren't met.
+type AdminValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AdminValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AdminValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AdminValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AdminValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AdminValidationError) ErrorName() string { return "AdminValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AdminValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAdmin.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AdminValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AdminValidationError{}
+
+// Validate checks the field values on ClusterManager with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ClusterManager) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterManager with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClusterManagerMultiError,
+// or nil if none found.
+func (m *ClusterManager) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterManager) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for LocalClusterName
+
+ if all {
+ switch v := interface{}(m.GetOutlierDetection()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUpstreamBindConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLoadStatsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLoadStatsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableDeferredClusterCreation
+
+ if len(errors) > 0 {
+ return ClusterManagerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterManagerMultiError is an error wrapping multiple validation errors
+// returned by ClusterManager.ValidateAll() if the designated constraints
+// aren't met.
+type ClusterManagerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterManagerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterManagerMultiError) AllErrors() []error { return m }
+
+// ClusterManagerValidationError is the validation error returned by
+// ClusterManager.Validate if the designated constraints aren't met.
+type ClusterManagerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterManagerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterManagerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterManagerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterManagerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterManagerValidationError) ErrorName() string { return "ClusterManagerValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClusterManagerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterManager.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterManagerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterManagerValidationError{}
+
+// Validate checks the field values on Watchdogs with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Watchdogs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdogs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in WatchdogsMultiError, or nil
+// if none found.
+func (m *Watchdogs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdogs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetMainThreadWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMainThreadWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWorkerWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWorkerWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return WatchdogsMultiError(errors)
+ }
+
+ return nil
+}
+
+// WatchdogsMultiError is an error wrapping multiple validation errors returned
+// by Watchdogs.ValidateAll() if the designated constraints aren't met.
+type WatchdogsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WatchdogsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WatchdogsMultiError) AllErrors() []error { return m }
+
+// WatchdogsValidationError is the validation error returned by
+// Watchdogs.Validate if the designated constraints aren't met.
+type WatchdogsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchdogsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchdogsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchdogsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchdogsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchdogsValidationError) ErrorName() string { return "WatchdogsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchdogsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdogs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchdogsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchdogsValidationError{}
+
+// Validate checks the field values on Watchdog with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Watchdog) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdog with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in WatchdogMultiError, or nil
+// if none found.
+func (m *Watchdog) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdog) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetActions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetMissTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMissTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMegamissTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMegamissTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetKillTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKillTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetMaxKillTimeoutJitter(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = WatchdogValidationError{
+ field: "MaxKillTimeoutJitter",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ err := WatchdogValidationError{
+ field: "MaxKillTimeoutJitter",
+ reason: "value must be greater than or equal to 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMultikillTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMultikillTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMultikillThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMultikillThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return WatchdogMultiError(errors)
+ }
+
+ return nil
+}
+
+// WatchdogMultiError is an error wrapping multiple validation errors returned
+// by Watchdog.ValidateAll() if the designated constraints aren't met.
+type WatchdogMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WatchdogMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WatchdogMultiError) AllErrors() []error { return m }
+
+// WatchdogValidationError is the validation error returned by
+// Watchdog.Validate if the designated constraints aren't met.
+type WatchdogValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchdogValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchdogValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchdogValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchdogValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchdogValidationError) ErrorName() string { return "WatchdogValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchdogValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdog.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchdogValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchdogValidationError{}
+
+// Validate checks the field values on FatalAction with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *FatalAction) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FatalAction with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in FatalActionMultiError, or
+// nil if none found.
+func (m *FatalAction) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FatalAction) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return FatalActionMultiError(errors)
+ }
+
+ return nil
+}
+
+// FatalActionMultiError is an error wrapping multiple validation errors
+// returned by FatalAction.ValidateAll() if the designated constraints aren't met.
+type FatalActionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FatalActionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FatalActionMultiError) AllErrors() []error { return m }
+
+// FatalActionValidationError is the validation error returned by
+// FatalAction.Validate if the designated constraints aren't met.
+type FatalActionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FatalActionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FatalActionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FatalActionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FatalActionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FatalActionValidationError) ErrorName() string { return "FatalActionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e FatalActionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFatalAction.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FatalActionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FatalActionValidationError{}
+
+// Validate checks the field values on Runtime with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Runtime) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Runtime with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in RuntimeMultiError, or nil if none found.
+func (m *Runtime) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Runtime) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for SymlinkRoot
+
+ // no validation rules for Subdirectory
+
+ // no validation rules for OverrideSubdirectory
+
+ if all {
+ switch v := interface{}(m.GetBase()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBase()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RuntimeMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeMultiError is an error wrapping multiple validation errors returned
+// by Runtime.ValidateAll() if the designated constraints aren't met.
+type RuntimeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeMultiError) AllErrors() []error { return m }
+
+// RuntimeValidationError is the validation error returned by Runtime.Validate
+// if the designated constraints aren't met.
+type RuntimeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeValidationError) ErrorName() string { return "RuntimeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntime.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeValidationError{}
+
+// Validate checks the field values on RuntimeLayer with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeLayerMultiError, or
+// nil if none found.
+func (m *RuntimeLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := RuntimeLayerValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofLayerSpecifierPresent := false
+ switch v := m.LayerSpecifier.(type) {
+ case *RuntimeLayer_StaticLayer:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStaticLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStaticLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_DiskLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetDiskLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDiskLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_AdminLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAdminLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdminLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_RtdsLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetRtdsLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRtdsLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofLayerSpecifierPresent {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimeLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayerMultiError is an error wrapping multiple validation errors
+// returned by RuntimeLayer.ValidateAll() if the designated constraints aren't met.
+type RuntimeLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayerValidationError is the validation error returned by
+// RuntimeLayer.Validate if the designated constraints aren't met.
+type RuntimeLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayerValidationError) ErrorName() string { return "RuntimeLayerValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayerValidationError{}
+
+// Validate checks the field values on LayeredRuntime with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *LayeredRuntime) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on LayeredRuntime with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LayeredRuntimeMultiError,
+// or nil if none found.
+func (m *LayeredRuntime) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *LayeredRuntime) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetLayers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return LayeredRuntimeMultiError(errors)
+ }
+
+ return nil
+}
+
+// LayeredRuntimeMultiError is an error wrapping multiple validation errors
+// returned by LayeredRuntime.ValidateAll() if the designated constraints
+// aren't met.
+type LayeredRuntimeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LayeredRuntimeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LayeredRuntimeMultiError) AllErrors() []error { return m }
+
+// LayeredRuntimeValidationError is the validation error returned by
+// LayeredRuntime.Validate if the designated constraints aren't met.
+type LayeredRuntimeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LayeredRuntimeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LayeredRuntimeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LayeredRuntimeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LayeredRuntimeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LayeredRuntimeValidationError) ErrorName() string { return "LayeredRuntimeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LayeredRuntimeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLayeredRuntime.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LayeredRuntimeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LayeredRuntimeValidationError{}
+
+// Validate checks the field values on CustomInlineHeader with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CustomInlineHeader) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CustomInlineHeader with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CustomInlineHeaderMultiError, or nil if none found.
+func (m *CustomInlineHeader) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CustomInlineHeader) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetInlineHeaderName()) < 1 {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderName",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_CustomInlineHeader_InlineHeaderName_Pattern.MatchString(m.GetInlineHeaderName()) {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if _, ok := CustomInlineHeader_InlineHeaderType_name[int32(m.GetInlineHeaderType())]; !ok {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderType",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return CustomInlineHeaderMultiError(errors)
+ }
+
+ return nil
+}
+
+// CustomInlineHeaderMultiError is an error wrapping multiple validation errors
+// returned by CustomInlineHeader.ValidateAll() if the designated constraints
+// aren't met.
+type CustomInlineHeaderMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CustomInlineHeaderMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CustomInlineHeaderMultiError) AllErrors() []error { return m }
+
+// CustomInlineHeaderValidationError is the validation error returned by
+// CustomInlineHeader.Validate if the designated constraints aren't met.
+type CustomInlineHeaderValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CustomInlineHeaderValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CustomInlineHeaderValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CustomInlineHeaderValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CustomInlineHeaderValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CustomInlineHeaderValidationError) ErrorName() string {
+ return "CustomInlineHeaderValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CustomInlineHeaderValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCustomInlineHeader.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CustomInlineHeaderValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CustomInlineHeaderValidationError{}
+
+var _CustomInlineHeader_InlineHeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on MemoryAllocatorManager with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *MemoryAllocatorManager) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MemoryAllocatorManager with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MemoryAllocatorManagerMultiError, or nil if none found.
+func (m *MemoryAllocatorManager) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MemoryAllocatorManager) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BytesToRelease
+
+ if all {
+ switch v := interface{}(m.GetMemoryReleaseInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMemoryReleaseInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return MemoryAllocatorManagerMultiError(errors)
+ }
+
+ return nil
+}
+
+// MemoryAllocatorManagerMultiError is an error wrapping multiple validation
+// errors returned by MemoryAllocatorManager.ValidateAll() if the designated
+// constraints aren't met.
+type MemoryAllocatorManagerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MemoryAllocatorManagerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MemoryAllocatorManagerMultiError) AllErrors() []error { return m }
+
+// MemoryAllocatorManagerValidationError is the validation error returned by
+// MemoryAllocatorManager.Validate if the designated constraints aren't met.
+type MemoryAllocatorManagerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MemoryAllocatorManagerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MemoryAllocatorManagerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MemoryAllocatorManagerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MemoryAllocatorManagerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MemoryAllocatorManagerValidationError) ErrorName() string {
+ return "MemoryAllocatorManagerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MemoryAllocatorManagerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMemoryAllocatorManager.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MemoryAllocatorManagerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MemoryAllocatorManagerValidationError{}
+
+// Validate checks the field values on Bootstrap_StaticResources with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_StaticResources) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_StaticResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Bootstrap_StaticResourcesMultiError, or nil if none found.
+func (m *Bootstrap_StaticResources) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_StaticResources) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_StaticResourcesMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_StaticResourcesMultiError is an error wrapping multiple validation
+// errors returned by Bootstrap_StaticResources.ValidateAll() if the
+// designated constraints aren't met.
+type Bootstrap_StaticResourcesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_StaticResourcesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_StaticResourcesMultiError) AllErrors() []error { return m }
+
+// Bootstrap_StaticResourcesValidationError is the validation error returned by
+// Bootstrap_StaticResources.Validate if the designated constraints aren't met.
+type Bootstrap_StaticResourcesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_StaticResourcesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_StaticResourcesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_StaticResourcesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_StaticResourcesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_StaticResourcesValidationError) ErrorName() string {
+ return "Bootstrap_StaticResourcesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_StaticResourcesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_StaticResources.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_StaticResourcesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_StaticResourcesValidationError{}
+
+// Validate checks the field values on Bootstrap_DynamicResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_DynamicResources) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_DynamicResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Bootstrap_DynamicResourcesMultiError, or nil if none found.
+func (m *Bootstrap_DynamicResources) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_DynamicResources) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetLdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for LdsResourcesLocator
+
+ if all {
+ switch v := interface{}(m.GetCdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for CdsResourcesLocator
+
+ if all {
+ switch v := interface{}(m.GetAdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_DynamicResourcesMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_DynamicResourcesMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_DynamicResources.ValidateAll() if
+// the designated constraints aren't met.
+type Bootstrap_DynamicResourcesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_DynamicResourcesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_DynamicResourcesMultiError) AllErrors() []error { return m }
+
+// Bootstrap_DynamicResourcesValidationError is the validation error returned
+// by Bootstrap_DynamicResources.Validate if the designated constraints aren't met.
+type Bootstrap_DynamicResourcesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_DynamicResourcesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_DynamicResourcesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_DynamicResourcesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_DynamicResourcesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_DynamicResourcesValidationError) ErrorName() string {
+ return "Bootstrap_DynamicResourcesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_DynamicResourcesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_DynamicResources.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_DynamicResourcesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_DynamicResourcesValidationError{}
+
+// Validate checks the field values on Bootstrap_ApplicationLogConfig with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_ApplicationLogConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_ApplicationLogConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Bootstrap_ApplicationLogConfigMultiError, or nil if none found.
+func (m *Bootstrap_ApplicationLogConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_ApplicationLogConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetLogFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLogFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_ApplicationLogConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_ApplicationLogConfigMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_ApplicationLogConfig.ValidateAll()
+// if the designated constraints aren't met.
+type Bootstrap_ApplicationLogConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_ApplicationLogConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_ApplicationLogConfigMultiError) AllErrors() []error { return m }
+
+// Bootstrap_ApplicationLogConfigValidationError is the validation error
+// returned by Bootstrap_ApplicationLogConfig.Validate if the designated
+// constraints aren't met.
+type Bootstrap_ApplicationLogConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_ApplicationLogConfigValidationError) ErrorName() string {
+ return "Bootstrap_ApplicationLogConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_ApplicationLogConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_ApplicationLogConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_ApplicationLogConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_ApplicationLogConfigValidationError{}
+
+// Validate checks the field values on Bootstrap_DeferredStatOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_DeferredStatOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_DeferredStatOptions with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Bootstrap_DeferredStatOptionsMultiError, or nil if none found.
+func (m *Bootstrap_DeferredStatOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_DeferredStatOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for EnableDeferredCreationStats
+
+ if len(errors) > 0 {
+ return Bootstrap_DeferredStatOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_DeferredStatOptionsMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_DeferredStatOptions.ValidateAll()
+// if the designated constraints aren't met.
+type Bootstrap_DeferredStatOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_DeferredStatOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_DeferredStatOptionsMultiError) AllErrors() []error { return m }
+
+// Bootstrap_DeferredStatOptionsValidationError is the validation error
+// returned by Bootstrap_DeferredStatOptions.Validate if the designated
+// constraints aren't met.
+type Bootstrap_DeferredStatOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_DeferredStatOptionsValidationError) ErrorName() string {
+ return "Bootstrap_DeferredStatOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_DeferredStatOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_DeferredStatOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_DeferredStatOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_DeferredStatOptionsValidationError{}
+
+// Validate checks the field values on Bootstrap_GrpcAsyncClientManagerConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Bootstrap_GrpcAsyncClientManagerConfig with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// Bootstrap_GrpcAsyncClientManagerConfigMultiError, or nil if none found.
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if d := m.GetMaxCachedEntryIdleDuration(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = Bootstrap_GrpcAsyncClientManagerConfigValidationError{
+ field: "MaxCachedEntryIdleDuration",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(5*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ err := Bootstrap_GrpcAsyncClientManagerConfigValidationError{
+ field: "MaxCachedEntryIdleDuration",
+ reason: "value must be greater than or equal to 5s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_GrpcAsyncClientManagerConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_GrpcAsyncClientManagerConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// Bootstrap_GrpcAsyncClientManagerConfig.ValidateAll() if the designated
+// constraints aren't met.
+type Bootstrap_GrpcAsyncClientManagerConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) AllErrors() []error { return m }
+
+// Bootstrap_GrpcAsyncClientManagerConfigValidationError is the validation
+// error returned by Bootstrap_GrpcAsyncClientManagerConfig.Validate if the
+// designated constraints aren't met.
+type Bootstrap_GrpcAsyncClientManagerConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) ErrorName() string {
+ return "Bootstrap_GrpcAsyncClientManagerConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_GrpcAsyncClientManagerConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_GrpcAsyncClientManagerConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_GrpcAsyncClientManagerConfigValidationError{}
+
+// Validate checks the field values on Bootstrap_ApplicationLogConfig_LogFormat
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Bootstrap_ApplicationLogConfig_LogFormat with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Bootstrap_ApplicationLogConfig_LogFormatMultiError, or nil if none found.
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofLogFormatPresent := false
+ switch v := m.LogFormat.(type) {
+ case *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat:
+ if v == nil {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLogFormatPresent = true
+
+ if all {
+ switch v := interface{}(m.GetJsonFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat:
+ if v == nil {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLogFormatPresent = true
+ // no validation rules for TextFormat
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofLogFormatPresent {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_ApplicationLogConfig_LogFormatMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_ApplicationLogConfig_LogFormatMultiError is an error wrapping
+// multiple validation errors returned by
+// Bootstrap_ApplicationLogConfig_LogFormat.ValidateAll() if the designated
+// constraints aren't met.
+type Bootstrap_ApplicationLogConfig_LogFormatMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) AllErrors() []error { return m }
+
+// Bootstrap_ApplicationLogConfig_LogFormatValidationError is the validation
+// error returned by Bootstrap_ApplicationLogConfig_LogFormat.Validate if the
+// designated constraints aren't met.
+type Bootstrap_ApplicationLogConfig_LogFormatValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) ErrorName() string {
+ return "Bootstrap_ApplicationLogConfig_LogFormatValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_ApplicationLogConfig_LogFormat.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_ApplicationLogConfig_LogFormatValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_ApplicationLogConfig_LogFormatValidationError{}
+
+// Validate checks the field values on ClusterManager_OutlierDetection with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ClusterManager_OutlierDetection) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterManager_OutlierDetection with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ClusterManager_OutlierDetectionMultiError, or nil if none found.
+func (m *ClusterManager_OutlierDetection) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterManager_OutlierDetection) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for EventLogPath
+
+ if all {
+ switch v := interface{}(m.GetEventService()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ClusterManager_OutlierDetectionMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterManager_OutlierDetectionMultiError is an error wrapping multiple
+// validation errors returned by ClusterManager_OutlierDetection.ValidateAll()
+// if the designated constraints aren't met.
+type ClusterManager_OutlierDetectionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterManager_OutlierDetectionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterManager_OutlierDetectionMultiError) AllErrors() []error { return m }
+
+// ClusterManager_OutlierDetectionValidationError is the validation error
+// returned by ClusterManager_OutlierDetection.Validate if the designated
+// constraints aren't met.
+type ClusterManager_OutlierDetectionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterManager_OutlierDetectionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterManager_OutlierDetectionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterManager_OutlierDetectionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterManager_OutlierDetectionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterManager_OutlierDetectionValidationError) ErrorName() string {
+ return "ClusterManager_OutlierDetectionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClusterManager_OutlierDetectionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterManager_OutlierDetection.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterManager_OutlierDetectionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterManager_OutlierDetectionValidationError{}
+
+// Validate checks the field values on Watchdog_WatchdogAction with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Watchdog_WatchdogAction) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdog_WatchdogAction with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Watchdog_WatchdogActionMultiError, or nil if none found.
+func (m *Watchdog_WatchdogAction) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdog_WatchdogAction) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := Watchdog_WatchdogAction_WatchdogEvent_name[int32(m.GetEvent())]; !ok {
+ err := Watchdog_WatchdogActionValidationError{
+ field: "Event",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Watchdog_WatchdogActionMultiError(errors)
+ }
+
+ return nil
+}
+
+// Watchdog_WatchdogActionMultiError is an error wrapping multiple validation
+// errors returned by Watchdog_WatchdogAction.ValidateAll() if the designated
+// constraints aren't met.
+type Watchdog_WatchdogActionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Watchdog_WatchdogActionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Watchdog_WatchdogActionMultiError) AllErrors() []error { return m }
+
+// Watchdog_WatchdogActionValidationError is the validation error returned by
+// Watchdog_WatchdogAction.Validate if the designated constraints aren't met.
+type Watchdog_WatchdogActionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Watchdog_WatchdogActionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Watchdog_WatchdogActionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Watchdog_WatchdogActionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Watchdog_WatchdogActionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Watchdog_WatchdogActionValidationError) ErrorName() string {
+ return "Watchdog_WatchdogActionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Watchdog_WatchdogActionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdog_WatchdogAction.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Watchdog_WatchdogActionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Watchdog_WatchdogActionValidationError{}
+
+// Validate checks the field values on RuntimeLayer_DiskLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_DiskLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_DiskLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_DiskLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_DiskLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_DiskLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for SymlinkRoot
+
+ // no validation rules for Subdirectory
+
+ // no validation rules for AppendServiceCluster
+
+ if len(errors) > 0 {
+ return RuntimeLayer_DiskLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_DiskLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_DiskLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_DiskLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_DiskLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_DiskLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_DiskLayerValidationError is the validation error returned by
+// RuntimeLayer_DiskLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_DiskLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_DiskLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_DiskLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_DiskLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_DiskLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_DiskLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_DiskLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_DiskLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_DiskLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_DiskLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_DiskLayerValidationError{}
+
+// Validate checks the field values on RuntimeLayer_AdminLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_AdminLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_AdminLayer with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_AdminLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_AdminLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_AdminLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return RuntimeLayer_AdminLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_AdminLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_AdminLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_AdminLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_AdminLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_AdminLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_AdminLayerValidationError is the validation error returned by
+// RuntimeLayer_AdminLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_AdminLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_AdminLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_AdminLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_AdminLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_AdminLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_AdminLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_AdminLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_AdminLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_AdminLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_AdminLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_AdminLayerValidationError{}
+
+// Validate checks the field values on RuntimeLayer_RtdsLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_RtdsLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_RtdsLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_RtdsLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_RtdsLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_RtdsLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetRtdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRtdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RuntimeLayer_RtdsLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_RtdsLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_RtdsLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_RtdsLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_RtdsLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_RtdsLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_RtdsLayerValidationError is the validation error returned by
+// RuntimeLayer_RtdsLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_RtdsLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_RtdsLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_RtdsLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_RtdsLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_RtdsLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_RtdsLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_RtdsLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_RtdsLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_RtdsLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_RtdsLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_RtdsLayerValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go
new file mode 100644
index 000000000..51e10e0e0
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go
@@ -0,0 +1,3128 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb"
+ structpb "github.com/planetscale/vtprotobuf/types/known/structpb"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Bootstrap_StaticResources) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_StaticResources) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_StaticResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Secrets) > 0 {
+ for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Secrets[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Secrets[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Clusters[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Clusters[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Listeners) > 0 {
+ for iNdEx := len(m.Listeners) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Listeners[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Listeners[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_DynamicResources) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_DynamicResources) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_DynamicResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.CdsResourcesLocator) > 0 {
+ i -= len(m.CdsResourcesLocator)
+ copy(dAtA[i:], m.CdsResourcesLocator)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CdsResourcesLocator)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.LdsResourcesLocator) > 0 {
+ i -= len(m.LdsResourcesLocator)
+ copy(dAtA[i:], m.LdsResourcesLocator)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LdsResourcesLocator)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.AdsConfig != nil {
+ if vtmsg, ok := interface{}(m.AdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CdsConfig != nil {
+ if vtmsg, ok := interface{}(m.CdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.CdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.LdsConfig != nil {
+ if vtmsg, ok := interface{}(m.LdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.JsonFormat != nil {
+ size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.TextFormat)
+ copy(dAtA[i:], m.TextFormat)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
+}
+func (m *Bootstrap_ApplicationLogConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LogFormat != nil {
+ size, err := m.LogFormat.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EnableDeferredCreationStats {
+ i--
+ if m.EnableDeferredCreationStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MaxCachedEntryIdleDuration != nil {
+ size, err := (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MemoryAllocatorManager != nil {
+ size, err := m.MemoryAllocatorManager.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xca
+ }
+ if m.GrpcAsyncClientManagerConfig != nil {
+ size, err := m.GrpcAsyncClientManagerConfig.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc2
+ }
+ if m.DeferredStatOptions != nil {
+ size, err := m.DeferredStatOptions.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xba
+ }
+ if m.ApplicationLogConfig != nil {
+ size, err := m.ApplicationLogConfig.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb2
+ }
+ if m.ListenerManager != nil {
+ if vtmsg, ok := interface{}(m.ListenerManager).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.ListenerManager)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xaa
+ }
+ if m.XdsConfigTrackerExtension != nil {
+ if vtmsg, ok := interface{}(m.XdsConfigTrackerExtension).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.XdsConfigTrackerExtension)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.XdsDelegateExtension != nil {
+ if vtmsg, ok := interface{}(m.XdsDelegateExtension).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.XdsDelegateExtension)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.DefaultRegexEngine != nil {
+ if vtmsg, ok := interface{}(m.DefaultRegexEngine).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DefaultRegexEngine)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x92
+ }
+ if len(m.PerfTracingFilePath) > 0 {
+ i -= len(m.PerfTracingFilePath)
+ copy(dAtA[i:], m.PerfTracingFilePath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PerfTracingFilePath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x8a
+ }
+ if len(m.InlineHeaders) > 0 {
+ for iNdEx := len(m.InlineHeaders) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.InlineHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x82
+ }
+ }
+ if m.TypedDnsResolverConfig != nil {
+ if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.TypedDnsResolverConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xfa
+ }
+ if m.DnsResolutionConfig != nil {
+ if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DnsResolutionConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf2
+ }
+ if msg, ok := m.StatsFlush.(*Bootstrap_StatsFlushOnAdmin); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.FatalActions) > 0 {
+ for iNdEx := len(m.FatalActions) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.FatalActions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe2
+ }
+ }
+ if m.Watchdogs != nil {
+ size, err := m.Watchdogs.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xda
+ }
+ if len(m.NodeContextParams) > 0 {
+ for iNdEx := len(m.NodeContextParams) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.NodeContextParams[iNdEx])
+ copy(dAtA[i:], m.NodeContextParams[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NodeContextParams[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd2
+ }
+ }
+ if len(m.CertificateProviderInstances) > 0 {
+ for k := range m.CertificateProviderInstances {
+ v := m.CertificateProviderInstances[k]
+ baseI := i
+ if vtmsg, ok := interface{}(v).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(v)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xca
+ }
+ }
+ if len(m.DefaultSocketInterface) > 0 {
+ i -= len(m.DefaultSocketInterface)
+ copy(dAtA[i:], m.DefaultSocketInterface)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultSocketInterface)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc2
+ }
+ if m.DefaultConfigSource != nil {
+ if vtmsg, ok := interface{}(m.DefaultConfigSource).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DefaultConfigSource)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xba
+ }
+ if len(m.ConfigSources) > 0 {
+ for iNdEx := len(m.ConfigSources) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.ConfigSources[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.ConfigSources[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb2
+ }
+ }
+ if len(m.BootstrapExtensions) > 0 {
+ for iNdEx := len(m.BootstrapExtensions) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.BootstrapExtensions[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.BootstrapExtensions[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xaa
+ }
+ }
+ if m.UseTcpForDnsLookups {
+ i--
+ if m.UseTcpForDnsLookups {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xa0
+ }
+ if m.StatsServerVersionOverride != nil {
+ size, err := (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ if len(m.HeaderPrefix) > 0 {
+ i -= len(m.HeaderPrefix)
+ copy(dAtA[i:], m.HeaderPrefix)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderPrefix)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.LayeredRuntime != nil {
+ size, err := m.LayeredRuntime.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.EnableDispatcherStats {
+ i--
+ if m.EnableDispatcherStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x80
+ }
+ if m.OverloadManager != nil {
+ if vtmsg, ok := interface{}(m.OverloadManager).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.OverloadManager)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.HdsConfig != nil {
+ if vtmsg, ok := interface{}(m.HdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.HdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.StatsConfig != nil {
+ if vtmsg, ok := interface{}(m.StatsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.StatsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ if m.Admin != nil {
+ size, err := m.Admin.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.Tracing != nil {
+ if vtmsg, ok := interface{}(m.Tracing).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Tracing)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Watchdog != nil {
+ size, err := m.Watchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.StatsFlushInterval != nil {
+ size, err := (*durationpb.Duration)(m.StatsFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.StatsSinks) > 0 {
+ for iNdEx := len(m.StatsSinks) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.StatsSinks[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.StatsSinks[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.FlagsPath) > 0 {
+ i -= len(m.FlagsPath)
+ copy(dAtA[i:], m.FlagsPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FlagsPath)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.ClusterManager != nil {
+ size, err := m.ClusterManager.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.DynamicResources != nil {
+ size, err := m.DynamicResources.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.StaticResources != nil {
+ size, err := m.StaticResources.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Node != nil {
+ if vtmsg, ok := interface{}(m.Node).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Node)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i--
+ if m.StatsFlushOnAdmin {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe8
+ return len(dAtA) - i, nil
+}
+func (m *Admin) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Admin) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Admin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.IgnoreGlobalConnLimit {
+ i--
+ if m.IgnoreGlobalConnLimit {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.AccessLog) > 0 {
+ for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AccessLog[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.SocketOptions) > 0 {
+ for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SocketOptions[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.Address != nil {
+ if vtmsg, ok := interface{}(m.Address).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Address)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ProfilePath) > 0 {
+ i -= len(m.ProfilePath)
+ copy(dAtA[i:], m.ProfilePath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfilePath)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.AccessLogPath) > 0 {
+ i -= len(m.AccessLogPath)
+ copy(dAtA[i:], m.AccessLogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessLogPath)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EventService != nil {
+ if vtmsg, ok := interface{}(m.EventService).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.EventService)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.EventLogPath) > 0 {
+ i -= len(m.EventLogPath)
+ copy(dAtA[i:], m.EventLogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterManager) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterManager) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EnableDeferredClusterCreation {
+ i--
+ if m.EnableDeferredClusterCreation {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.LoadStatsConfig != nil {
+ if vtmsg, ok := interface{}(m.LoadStatsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LoadStatsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.UpstreamBindConfig != nil {
+ if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.UpstreamBindConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.OutlierDetection != nil {
+ size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.LocalClusterName) > 0 {
+ i -= len(m.LocalClusterName)
+ copy(dAtA[i:], m.LocalClusterName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LocalClusterName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdogs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdogs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdogs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.WorkerWatchdog != nil {
+ size, err := m.WorkerWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MainThreadWatchdog != nil {
+ size, err := m.MainThreadWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdog_WatchdogAction) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdog_WatchdogAction) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdog_WatchdogAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Event != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Event))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Config != nil {
+ if vtmsg, ok := interface{}(m.Config).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Config)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdog) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdog) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Actions) > 0 {
+ for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.MaxKillTimeoutJitter != nil {
+ size, err := (*durationpb.Duration)(m.MaxKillTimeoutJitter).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.MultikillThreshold != nil {
+ if vtmsg, ok := interface{}(m.MultikillThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.MultikillThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.MultikillTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MultikillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.KillTimeout != nil {
+ size, err := (*durationpb.Duration)(m.KillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MegamissTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MegamissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MissTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *FatalAction) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FatalAction) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *FatalAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Config != nil {
+ if vtmsg, ok := interface{}(m.Config).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Config)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Runtime) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Runtime) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Runtime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Base != nil {
+ size, err := (*structpb.Struct)(m.Base).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.OverrideSubdirectory) > 0 {
+ i -= len(m.OverrideSubdirectory)
+ copy(dAtA[i:], m.OverrideSubdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideSubdirectory)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Subdirectory) > 0 {
+ i -= len(m.Subdirectory)
+ copy(dAtA[i:], m.Subdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.SymlinkRoot) > 0 {
+ i -= len(m.SymlinkRoot)
+ copy(dAtA[i:], m.SymlinkRoot)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Subdirectory) > 0 {
+ i -= len(m.Subdirectory)
+ copy(dAtA[i:], m.Subdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.AppendServiceCluster {
+ i--
+ if m.AppendServiceCluster {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.SymlinkRoot) > 0 {
+ i -= len(m.SymlinkRoot)
+ copy(dAtA[i:], m.SymlinkRoot)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.RtdsConfig != nil {
+ if vtmsg, ok := interface{}(m.RtdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.RtdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_RtdsLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_AdminLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_DiskLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_StaticLayer); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_StaticLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_StaticLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.StaticLayer != nil {
+ size, err := (*structpb.Struct)(m.StaticLayer).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_DiskLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_DiskLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DiskLayer != nil {
+ size, err := m.DiskLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_AdminLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_AdminLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AdminLayer != nil {
+ size, err := m.AdminLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_RtdsLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_RtdsLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RtdsLayer != nil {
+ size, err := m.RtdsLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x2a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *LayeredRuntime) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LayeredRuntime) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *LayeredRuntime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Layers) > 0 {
+ for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Layers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CustomInlineHeader) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomInlineHeader) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CustomInlineHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.InlineHeaderType != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InlineHeaderType))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.InlineHeaderName) > 0 {
+ i -= len(m.InlineHeaderName)
+ copy(dAtA[i:], m.InlineHeaderName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineHeaderName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemoryAllocatorManager) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemoryAllocatorManager) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MemoryAllocatorManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MemoryReleaseInterval != nil {
+ size, err := (*durationpb.Duration)(m.MemoryReleaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.BytesToRelease != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesToRelease))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_StaticResources) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Listeners) > 0 {
+ for _, e := range m.Listeners {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for _, e := range m.Clusters {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.Secrets) > 0 {
+ for _, e := range m.Secrets {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_DynamicResources) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LdsConfig != nil {
+ if size, ok := interface{}(m.LdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CdsConfig != nil {
+ if size, ok := interface{}(m.CdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.CdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AdsConfig != nil {
+ if size, ok := interface{}(m.AdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.AdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LdsResourcesLocator)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.CdsResourcesLocator)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.LogFormat.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.JsonFormat != nil {
+ l = (*structpb.Struct)(m.JsonFormat).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.TextFormat)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *Bootstrap_ApplicationLogConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LogFormat != nil {
+ l = m.LogFormat.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_DeferredStatOptions) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.EnableDeferredCreationStats {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxCachedEntryIdleDuration != nil {
+ l = (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Node != nil {
+ if size, ok := interface{}(m.Node).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Node)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StaticResources != nil {
+ l = m.StaticResources.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DynamicResources != nil {
+ l = m.DynamicResources.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClusterManager != nil {
+ l = m.ClusterManager.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.FlagsPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StatsSinks) > 0 {
+ for _, e := range m.StatsSinks {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.StatsFlushInterval != nil {
+ l = (*durationpb.Duration)(m.StatsFlushInterval).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Watchdog != nil {
+ l = m.Watchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Tracing != nil {
+ if size, ok := interface{}(m.Tracing).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Tracing)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Admin != nil {
+ l = m.Admin.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StatsConfig != nil {
+ if size, ok := interface{}(m.StatsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.StatsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.HdsConfig != nil {
+ if size, ok := interface{}(m.HdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.HdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OverloadManager != nil {
+ if size, ok := interface{}(m.OverloadManager).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.OverloadManager)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EnableDispatcherStats {
+ n += 3
+ }
+ if m.LayeredRuntime != nil {
+ l = m.LayeredRuntime.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.HeaderPrefix)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StatsServerVersionOverride != nil {
+ l = (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UseTcpForDnsLookups {
+ n += 3
+ }
+ if len(m.BootstrapExtensions) > 0 {
+ for _, e := range m.BootstrapExtensions {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.ConfigSources) > 0 {
+ for _, e := range m.ConfigSources {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.DefaultConfigSource != nil {
+ if size, ok := interface{}(m.DefaultConfigSource).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DefaultConfigSource)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.DefaultSocketInterface)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.CertificateProviderInstances) > 0 {
+ for k, v := range m.CertificateProviderInstances {
+ _ = k
+ _ = v
+ l = 0
+ if v != nil {
+ if size, ok := interface{}(v).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(v)
+ }
+ }
+ l += 1 + protohelpers.SizeOfVarint(uint64(l))
+ mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l
+ n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize))
+ }
+ }
+ if len(m.NodeContextParams) > 0 {
+ for _, s := range m.NodeContextParams {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.Watchdogs != nil {
+ l = m.Watchdogs.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.FatalActions) > 0 {
+ for _, e := range m.FatalActions {
+ l = e.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if vtmsg, ok := m.StatsFlush.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ if m.DnsResolutionConfig != nil {
+ if size, ok := interface{}(m.DnsResolutionConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DnsResolutionConfig)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TypedDnsResolverConfig != nil {
+ if size, ok := interface{}(m.TypedDnsResolverConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.TypedDnsResolverConfig)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.InlineHeaders) > 0 {
+ for _, e := range m.InlineHeaders {
+ l = e.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ l = len(m.PerfTracingFilePath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DefaultRegexEngine != nil {
+ if size, ok := interface{}(m.DefaultRegexEngine).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DefaultRegexEngine)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.XdsDelegateExtension != nil {
+ if size, ok := interface{}(m.XdsDelegateExtension).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.XdsDelegateExtension)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.XdsConfigTrackerExtension != nil {
+ if size, ok := interface{}(m.XdsConfigTrackerExtension).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.XdsConfigTrackerExtension)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ListenerManager != nil {
+ if size, ok := interface{}(m.ListenerManager).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.ListenerManager)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ApplicationLogConfig != nil {
+ l = m.ApplicationLogConfig.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DeferredStatOptions != nil {
+ l = m.DeferredStatOptions.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.GrpcAsyncClientManagerConfig != nil {
+ l = m.GrpcAsyncClientManagerConfig.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MemoryAllocatorManager != nil {
+ l = m.MemoryAllocatorManager.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 3
+ return n
+}
+func (m *Admin) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.AccessLogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ProfilePath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Address != nil {
+ if size, ok := interface{}(m.Address).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Address)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.SocketOptions) > 0 {
+ for _, e := range m.SocketOptions {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.AccessLog) > 0 {
+ for _, e := range m.AccessLog {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.IgnoreGlobalConnLimit {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterManager_OutlierDetection) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.EventLogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EventService != nil {
+ if size, ok := interface{}(m.EventService).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.EventService)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterManager) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.LocalClusterName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OutlierDetection != nil {
+ l = m.OutlierDetection.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UpstreamBindConfig != nil {
+ if size, ok := interface{}(m.UpstreamBindConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.UpstreamBindConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LoadStatsConfig != nil {
+ if size, ok := interface{}(m.LoadStatsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LoadStatsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EnableDeferredClusterCreation {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdogs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MainThreadWatchdog != nil {
+ l = m.MainThreadWatchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.WorkerWatchdog != nil {
+ l = m.WorkerWatchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdog_WatchdogAction) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Config != nil {
+ if size, ok := interface{}(m.Config).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Config)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Event != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Event))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdog) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MissTimeout != nil {
+ l = (*durationpb.Duration)(m.MissTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MegamissTimeout != nil {
+ l = (*durationpb.Duration)(m.MegamissTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.KillTimeout != nil {
+ l = (*durationpb.Duration)(m.KillTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MultikillTimeout != nil {
+ l = (*durationpb.Duration)(m.MultikillTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MultikillThreshold != nil {
+ if size, ok := interface{}(m.MultikillThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.MultikillThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxKillTimeoutJitter != nil {
+ l = (*durationpb.Duration)(m.MaxKillTimeoutJitter).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.Actions) > 0 {
+ for _, e := range m.Actions {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *FatalAction) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Config != nil {
+ if size, ok := interface{}(m.Config).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Config)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Runtime) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SymlinkRoot)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.Subdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.OverrideSubdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Base != nil {
+ l = (*structpb.Struct)(m.Base).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_DiskLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SymlinkRoot)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AppendServiceCluster {
+ n += 2
+ }
+ l = len(m.Subdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_AdminLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_RtdsLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RtdsConfig != nil {
+ if size, ok := interface{}(m.RtdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.RtdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.LayerSpecifier.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_StaticLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.StaticLayer != nil {
+ l = (*structpb.Struct)(m.StaticLayer).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_DiskLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DiskLayer != nil {
+ l = m.DiskLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_AdminLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AdminLayer != nil {
+ l = m.AdminLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_RtdsLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RtdsLayer != nil {
+ l = m.RtdsLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *LayeredRuntime) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Layers) > 0 {
+ for _, e := range m.Layers {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CustomInlineHeader) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.InlineHeaderName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.InlineHeaderType != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.InlineHeaderType))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *MemoryAllocatorManager) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BytesToRelease != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesToRelease))
+ }
+ if m.MemoryReleaseInterval != nil {
+ l = (*durationpb.Duration)(m.MemoryReleaseInterval).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go
new file mode 100644
index 000000000..e70e27b1f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go
@@ -0,0 +1,507 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// :ref:`Circuit breaking` settings can be
+// specified individually for each defined priority.
+type CircuitBreakers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If multiple :ref:`Thresholds`
+ // are defined with the same :ref:`RoutingPriority`,
+ // the first one in the list is used. If no Thresholds is defined for a given
+ // :ref:`RoutingPriority`, the default values
+ // are used.
+ Thresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,1,rep,name=thresholds,proto3" json:"thresholds,omitempty"`
+ // Optional per-host limits which apply to each individual host in a cluster.
+ //
+ // .. note::
+ //
+ // currently only the :ref:`max_connections
+ // ` field is supported for per-host limits.
+ //
+ // If multiple per-host :ref:`Thresholds`
+ // are defined with the same :ref:`RoutingPriority`,
+ // the first one in the list is used. If no per-host Thresholds are defined for a given
+ // :ref:`RoutingPriority`,
+ // the cluster will not have per-host limits.
+ PerHostThresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,2,rep,name=per_host_thresholds,json=perHostThresholds,proto3" json:"per_host_thresholds,omitempty"`
+}
+
+func (x *CircuitBreakers) Reset() {
+ *x = CircuitBreakers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers) ProtoMessage() {}
+
+func (x *CircuitBreakers) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CircuitBreakers) GetThresholds() []*CircuitBreakers_Thresholds {
+ if x != nil {
+ return x.Thresholds
+ }
+ return nil
+}
+
+func (x *CircuitBreakers) GetPerHostThresholds() []*CircuitBreakers_Thresholds {
+ if x != nil {
+ return x.PerHostThresholds
+ }
+ return nil
+}
+
+// A Thresholds defines CircuitBreaker settings for a
+// :ref:`RoutingPriority`.
+// [#next-free-field: 9]
+type CircuitBreakers_Thresholds struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The :ref:`RoutingPriority`
+ // the specified CircuitBreaker settings apply to.
+ Priority v3.RoutingPriority `protobuf:"varint,1,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"`
+ // The maximum number of connections that Envoy will make to the upstream
+ // cluster. If not specified, the default is 1024.
+ MaxConnections *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"`
+ // The maximum number of pending requests that Envoy will allow to the
+ // upstream cluster. If not specified, the default is 1024.
+ // This limit is applied as a connection limit for non-HTTP traffic.
+ MaxPendingRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"`
+ // The maximum number of parallel requests that Envoy will make to the
+ // upstream cluster. If not specified, the default is 1024.
+ // This limit does not apply to non-HTTP traffic.
+ MaxRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"`
+ // The maximum number of parallel retries that Envoy will allow to the
+ // upstream cluster. If not specified, the default is 3.
+ MaxRetries *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"`
+ // Specifies a limit on concurrent retries in relation to the number of active requests. This
+ // parameter is optional.
+ //
+ // .. note::
+ //
+ // If this field is set, the retry budget will override any configured retry circuit
+ // breaker.
+ RetryBudget *CircuitBreakers_Thresholds_RetryBudget `protobuf:"bytes,8,opt,name=retry_budget,json=retryBudget,proto3" json:"retry_budget,omitempty"`
+ // If track_remaining is true, then stats will be published that expose
+ // the number of resources remaining until the circuit breakers open. If
+ // not specified, the default is false.
+ //
+ // .. note::
+ //
+ // If a retry budget is used in lieu of the max_retries circuit breaker,
+ // the remaining retry resources remaining will not be tracked.
+ TrackRemaining bool `protobuf:"varint,6,opt,name=track_remaining,json=trackRemaining,proto3" json:"track_remaining,omitempty"`
+ // The maximum number of connection pools per cluster that Envoy will concurrently support at
+ // once. If not specified, the default is unlimited. Set this for clusters which create a
+ // large number of connection pools. See
+ // :ref:`Circuit Breaking ` for
+ // more details.
+ MaxConnectionPools *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"`
+}
+
+func (x *CircuitBreakers_Thresholds) Reset() {
+ *x = CircuitBreakers_Thresholds{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers_Thresholds) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers_Thresholds) ProtoMessage() {}
+
+func (x *CircuitBreakers_Thresholds) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers_Thresholds.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *CircuitBreakers_Thresholds) GetPriority() v3.RoutingPriority {
+ if x != nil {
+ return x.Priority
+ }
+ return v3.RoutingPriority(0)
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConnections
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxPendingRequests
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxRequests
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxRetries
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetRetryBudget() *CircuitBreakers_Thresholds_RetryBudget {
+ if x != nil {
+ return x.RetryBudget
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetTrackRemaining() bool {
+ if x != nil {
+ return x.TrackRemaining
+ }
+ return false
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConnectionPools
+ }
+ return nil
+}
+
+type CircuitBreakers_Thresholds_RetryBudget struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the limit on concurrent retries as a percentage of the sum of active requests and
+ // active pending requests. For example, if there are 100 active requests and the
+ // budget_percent is set to 25, there may be 25 active retries.
+ //
+ // This parameter is optional. Defaults to 20%.
+ BudgetPercent *v31.Percent `protobuf:"bytes,1,opt,name=budget_percent,json=budgetPercent,proto3" json:"budget_percent,omitempty"`
+ // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the
+ // number of active retries may never go below this number.
+ //
+ // This parameter is optional. Defaults to 3.
+ MinRetryConcurrency *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"`
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) Reset() {
+ *x = CircuitBreakers_Thresholds_RetryBudget{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers_Thresholds_RetryBudget) ProtoMessage() {}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers_Thresholds_RetryBudget.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers_Thresholds_RetryBudget) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) GetBudgetPercent() *v31.Percent {
+ if x != nil {
+ return x.BudgetPercent
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MinRetryConcurrency
+ }
+ return nil
+}
+
+var File_envoy_config_cluster_v3_circuit_breaker_proto protoreflect.FileDescriptor
+
+var file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0xe5, 0x08, 0x0a, 0x0f, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65,
+ 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+ 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b,
+ 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x0a,
+ 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x63, 0x0a, 0x13, 0x70, 0x65,
+ 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
+ 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x11, 0x70, 0x65,
+ 0x72, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x1a,
+ 0xea, 0x06, 0x0a, 0x0a, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4b,
+ 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50,
+ 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x6d,
+ 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12,
+ 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67,
+ 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65,
+ 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65,
+ 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79,
+ 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f,
+ 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12,
+ 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x1a,
+ 0xe2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12,
+ 0x3d, 0x0a, 0x0e, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52,
+ 0x0d, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x50,
+ 0x0a, 0x15, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6d, 0x69, 0x6e,
+ 0x52, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69,
+ 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68,
+ 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75,
+ 0x64, 0x67, 0x65, 0x74, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
+ 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x3a, 0x2b, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x43, 0x69, 0x72,
+ 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f,
+ 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce sync.Once
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc
+)
+
+func file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP() []byte {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce.Do(func() {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData)
+ })
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData
+}
+
+var file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = []interface{}{
+ (*CircuitBreakers)(nil), // 0: envoy.config.cluster.v3.CircuitBreakers
+ (*CircuitBreakers_Thresholds)(nil), // 1: envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ (*CircuitBreakers_Thresholds_RetryBudget)(nil), // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget
+ (v3.RoutingPriority)(0), // 3: envoy.config.core.v3.RoutingPriority
+ (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value
+ (*v31.Percent)(nil), // 5: envoy.type.v3.Percent
+}
+var file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.cluster.v3.CircuitBreakers.thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ 1, // 1: envoy.config.cluster.v3.CircuitBreakers.per_host_thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ 3, // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.priority:type_name -> envoy.config.core.v3.RoutingPriority
+ 4, // 3: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connections:type_name -> google.protobuf.UInt32Value
+ 4, // 4: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests:type_name -> google.protobuf.UInt32Value
+ 4, // 5: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_requests:type_name -> google.protobuf.UInt32Value
+ 4, // 6: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_retries:type_name -> google.protobuf.UInt32Value
+ 2, // 7: envoy.config.cluster.v3.CircuitBreakers.Thresholds.retry_budget:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget
+ 4, // 8: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connection_pools:type_name -> google.protobuf.UInt32Value
+ 5, // 9: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.budget_percent:type_name -> envoy.type.v3.Percent
+ 4, // 10: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.min_retry_concurrency:type_name -> google.protobuf.UInt32Value
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_cluster_v3_circuit_breaker_proto_init() }
+func file_envoy_config_cluster_v3_circuit_breaker_proto_init() {
+ if File_envoy_config_cluster_v3_circuit_breaker_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers_Thresholds); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers_Thresholds_RetryBudget); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes,
+ DependencyIndexes: file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs,
+ MessageInfos: file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes,
+ }.Build()
+ File_envoy_config_cluster_v3_circuit_breaker_proto = out.File
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = nil
+ file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = nil
+ file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go
new file mode 100644
index 000000000..8bf3373be
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go
@@ -0,0 +1,662 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.RoutingPriority(0)
+)
+
+// Validate checks the field values on CircuitBreakers with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CircuitBreakers) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CircuitBreakers with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CircuitBreakersMultiError, or nil if none found.
+func (m *CircuitBreakers) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetThresholds() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetPerHostThresholds() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakersMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakersMultiError is an error wrapping multiple validation errors
+// returned by CircuitBreakers.ValidateAll() if the designated constraints
+// aren't met.
+type CircuitBreakersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakersMultiError) AllErrors() []error { return m }
+
+// CircuitBreakersValidationError is the validation error returned by
+// CircuitBreakers.Validate if the designated constraints aren't met.
+type CircuitBreakersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakersValidationError) ErrorName() string { return "CircuitBreakersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakersValidationError{}
+
+// Validate checks the field values on CircuitBreakers_Thresholds with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CircuitBreakers_Thresholds) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CircuitBreakers_Thresholds with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CircuitBreakers_ThresholdsMultiError, or nil if none found.
+func (m *CircuitBreakers_Thresholds) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers_Thresholds) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok {
+ err := CircuitBreakers_ThresholdsValidationError{
+ field: "Priority",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxConnections()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConnections()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxPendingRequests()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxPendingRequests()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxRequests()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxRequests()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxRetries()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxRetries()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetRetryBudget()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRetryBudget()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for TrackRemaining
+
+ if all {
+ switch v := interface{}(m.GetMaxConnectionPools()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConnectionPools()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakers_ThresholdsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakers_ThresholdsMultiError is an error wrapping multiple
+// validation errors returned by CircuitBreakers_Thresholds.ValidateAll() if
+// the designated constraints aren't met.
+type CircuitBreakers_ThresholdsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakers_ThresholdsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakers_ThresholdsMultiError) AllErrors() []error { return m }
+
+// CircuitBreakers_ThresholdsValidationError is the validation error returned
+// by CircuitBreakers_Thresholds.Validate if the designated constraints aren't met.
+type CircuitBreakers_ThresholdsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakers_ThresholdsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakers_ThresholdsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakers_ThresholdsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakers_ThresholdsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakers_ThresholdsValidationError) ErrorName() string {
+ return "CircuitBreakers_ThresholdsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakers_ThresholdsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers_Thresholds.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakers_ThresholdsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakers_ThresholdsValidationError{}
+
+// Validate checks the field values on CircuitBreakers_Thresholds_RetryBudget
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *CircuitBreakers_Thresholds_RetryBudget) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// CircuitBreakers_Thresholds_RetryBudget with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// CircuitBreakers_Thresholds_RetryBudgetMultiError, or nil if none found.
+func (m *CircuitBreakers_Thresholds_RetryBudget) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetBudgetPercent()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBudgetPercent()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMinRetryConcurrency()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMinRetryConcurrency()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakers_Thresholds_RetryBudgetMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakers_Thresholds_RetryBudgetMultiError is an error wrapping
+// multiple validation errors returned by
+// CircuitBreakers_Thresholds_RetryBudget.ValidateAll() if the designated
+// constraints aren't met.
+type CircuitBreakers_Thresholds_RetryBudgetMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) AllErrors() []error { return m }
+
+// CircuitBreakers_Thresholds_RetryBudgetValidationError is the validation
+// error returned by CircuitBreakers_Thresholds_RetryBudget.Validate if the
+// designated constraints aren't met.
+type CircuitBreakers_Thresholds_RetryBudgetValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) ErrorName() string {
+ return "CircuitBreakers_Thresholds_RetryBudgetValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers_Thresholds_RetryBudget.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakers_Thresholds_RetryBudgetValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakers_Thresholds_RetryBudgetValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go
new file mode 100644
index 000000000..14ca0a1f1
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go
@@ -0,0 +1,337 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MinRetryConcurrency != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.BudgetPercent != nil {
+ if vtmsg, ok := interface{}(m.BudgetPercent).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.BudgetPercent)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.RetryBudget != nil {
+ size, err := m.RetryBudget.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.MaxConnectionPools != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionPools).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.TrackRemaining {
+ i--
+ if m.TrackRemaining {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.MaxRetries != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxRetries).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.MaxRequests != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxRequests).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.MaxPendingRequests != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxPendingRequests).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MaxConnections != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxConnections).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Priority != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.PerHostThresholds) > 0 {
+ for iNdEx := len(m.PerHostThresholds) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.PerHostThresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Thresholds) > 0 {
+ for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BudgetPercent != nil {
+ if size, ok := interface{}(m.BudgetPercent).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.BudgetPercent)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MinRetryConcurrency != nil {
+ l = (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CircuitBreakers_Thresholds) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Priority != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority))
+ }
+ if m.MaxConnections != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxConnections).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxPendingRequests != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxPendingRequests).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxRequests != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxRequests).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxRetries != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxRetries).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TrackRemaining {
+ n += 2
+ }
+ if m.MaxConnectionPools != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxConnectionPools).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RetryBudget != nil {
+ l = m.RetryBudget.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CircuitBreakers) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Thresholds) > 0 {
+ for _, e := range m.Thresholds {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.PerHostThresholds) > 0 {
+ for _, e := range m.PerHostThresholds {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go
new file mode 100644
index 000000000..a2fb08949
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go
@@ -0,0 +1,4698 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/cluster/v3/cluster.proto
+
+package clusterv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Refer to :ref:`service discovery type `
+// for an explanation on each type.
+type Cluster_DiscoveryType int32
+
+const (
+ // Refer to the :ref:`static discovery type`
+ // for an explanation.
+ Cluster_STATIC Cluster_DiscoveryType = 0
+ // Refer to the :ref:`strict DNS discovery
+ // type`
+ // for an explanation.
+ Cluster_STRICT_DNS Cluster_DiscoveryType = 1
+ // Refer to the :ref:`logical DNS discovery
+ // type`
+ // for an explanation.
+ Cluster_LOGICAL_DNS Cluster_DiscoveryType = 2
+ // Refer to the :ref:`service discovery type`
+ // for an explanation.
+ Cluster_EDS Cluster_DiscoveryType = 3
+ // Refer to the :ref:`original destination discovery
+ // type`
+ // for an explanation.
+ Cluster_ORIGINAL_DST Cluster_DiscoveryType = 4
+)
+
+// Enum value maps for Cluster_DiscoveryType.
+var (
+ Cluster_DiscoveryType_name = map[int32]string{
+ 0: "STATIC",
+ 1: "STRICT_DNS",
+ 2: "LOGICAL_DNS",
+ 3: "EDS",
+ 4: "ORIGINAL_DST",
+ }
+ Cluster_DiscoveryType_value = map[string]int32{
+ "STATIC": 0,
+ "STRICT_DNS": 1,
+ "LOGICAL_DNS": 2,
+ "EDS": 3,
+ "ORIGINAL_DST": 4,
+ }
+)
+
+func (x Cluster_DiscoveryType) Enum() *Cluster_DiscoveryType {
+ p := new(Cluster_DiscoveryType)
+ *p = x
+ return p
+}
+
+func (x Cluster_DiscoveryType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_DiscoveryType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[0].Descriptor()
+}
+
+func (Cluster_DiscoveryType) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[0]
+}
+
+func (x Cluster_DiscoveryType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_DiscoveryType.Descriptor instead.
+func (Cluster_DiscoveryType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Refer to :ref:`load balancer type ` architecture
+// overview section for information on each type.
+type Cluster_LbPolicy int32
+
+const (
+ // Refer to the :ref:`round robin load balancing
+ // policy`
+ // for an explanation.
+ Cluster_ROUND_ROBIN Cluster_LbPolicy = 0
+ // Refer to the :ref:`least request load balancing
+ // policy`
+ // for an explanation.
+ Cluster_LEAST_REQUEST Cluster_LbPolicy = 1
+ // Refer to the :ref:`ring hash load balancing
+ // policy`
+ // for an explanation.
+ Cluster_RING_HASH Cluster_LbPolicy = 2
+ // Refer to the :ref:`random load balancing
+ // policy`
+ // for an explanation.
+ Cluster_RANDOM Cluster_LbPolicy = 3
+ // Refer to the :ref:`Maglev load balancing policy`
+ // for an explanation.
+ Cluster_MAGLEV Cluster_LbPolicy = 5
+ // This load balancer type must be specified if the configured cluster provides a cluster
+ // specific load balancer. Consult the configured cluster's documentation for whether to set
+ // this option or not.
+ Cluster_CLUSTER_PROVIDED Cluster_LbPolicy = 6
+ // Use the new :ref:`load_balancing_policy
+ // ` field to determine the LB policy.
+ // This has been deprecated in favor of using the :ref:`load_balancing_policy
+ // ` field without
+ // setting any value in :ref:`lb_policy`.
+ Cluster_LOAD_BALANCING_POLICY_CONFIG Cluster_LbPolicy = 7
+)
+
+// Enum value maps for Cluster_LbPolicy.
+var (
+ Cluster_LbPolicy_name = map[int32]string{
+ 0: "ROUND_ROBIN",
+ 1: "LEAST_REQUEST",
+ 2: "RING_HASH",
+ 3: "RANDOM",
+ 5: "MAGLEV",
+ 6: "CLUSTER_PROVIDED",
+ 7: "LOAD_BALANCING_POLICY_CONFIG",
+ }
+ Cluster_LbPolicy_value = map[string]int32{
+ "ROUND_ROBIN": 0,
+ "LEAST_REQUEST": 1,
+ "RING_HASH": 2,
+ "RANDOM": 3,
+ "MAGLEV": 5,
+ "CLUSTER_PROVIDED": 6,
+ "LOAD_BALANCING_POLICY_CONFIG": 7,
+ }
+)
+
+func (x Cluster_LbPolicy) Enum() *Cluster_LbPolicy {
+ p := new(Cluster_LbPolicy)
+ *p = x
+ return p
+}
+
+func (x Cluster_LbPolicy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_LbPolicy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[1].Descriptor()
+}
+
+func (Cluster_LbPolicy) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[1]
+}
+
+func (x Cluster_LbPolicy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_LbPolicy.Descriptor instead.
+func (Cluster_LbPolicy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 1}
+}
+
+// When V4_ONLY is selected, the DNS resolver will only perform a lookup for
+// addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
+// only perform a lookup for addresses in the IPv6 family. If AUTO is
+// specified, the DNS resolver will first perform a lookup for addresses in
+// the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
+// This is semantically equivalent to a non-existent V6_PREFERRED option.
+// AUTO is a legacy name that is more opaque than
+// necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
+// If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
+// IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
+// target will only get v6 addresses if there were NO v4 addresses to return.
+// If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families,
+// and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for
+// upstream connections. Refer to :ref:`Happy Eyeballs Support `
+// for more information.
+// For cluster types other than
+// :ref:`STRICT_DNS` and
+// :ref:`LOGICAL_DNS`,
+// this setting is
+// ignored.
+// [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
+type Cluster_DnsLookupFamily int32
+
+const (
+ Cluster_AUTO Cluster_DnsLookupFamily = 0
+ Cluster_V4_ONLY Cluster_DnsLookupFamily = 1
+ Cluster_V6_ONLY Cluster_DnsLookupFamily = 2
+ Cluster_V4_PREFERRED Cluster_DnsLookupFamily = 3
+ Cluster_ALL Cluster_DnsLookupFamily = 4
+)
+
+// Enum value maps for Cluster_DnsLookupFamily.
+var (
+ Cluster_DnsLookupFamily_name = map[int32]string{
+ 0: "AUTO",
+ 1: "V4_ONLY",
+ 2: "V6_ONLY",
+ 3: "V4_PREFERRED",
+ 4: "ALL",
+ }
+ Cluster_DnsLookupFamily_value = map[string]int32{
+ "AUTO": 0,
+ "V4_ONLY": 1,
+ "V6_ONLY": 2,
+ "V4_PREFERRED": 3,
+ "ALL": 4,
+ }
+)
+
+func (x Cluster_DnsLookupFamily) Enum() *Cluster_DnsLookupFamily {
+ p := new(Cluster_DnsLookupFamily)
+ *p = x
+ return p
+}
+
+func (x Cluster_DnsLookupFamily) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_DnsLookupFamily) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[2].Descriptor()
+}
+
+func (Cluster_DnsLookupFamily) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[2]
+}
+
+func (x Cluster_DnsLookupFamily) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_DnsLookupFamily.Descriptor instead.
+func (Cluster_DnsLookupFamily) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 2}
+}
+
+type Cluster_ClusterProtocolSelection int32
+
+const (
+ // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).
+ // If :ref:`http2_protocol_options ` are
+ // present, HTTP2 will be used, otherwise HTTP1.1 will be used.
+ Cluster_USE_CONFIGURED_PROTOCOL Cluster_ClusterProtocolSelection = 0
+ // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.
+ Cluster_USE_DOWNSTREAM_PROTOCOL Cluster_ClusterProtocolSelection = 1
+)
+
+// Enum value maps for Cluster_ClusterProtocolSelection.
+var (
+ Cluster_ClusterProtocolSelection_name = map[int32]string{
+ 0: "USE_CONFIGURED_PROTOCOL",
+ 1: "USE_DOWNSTREAM_PROTOCOL",
+ }
+ Cluster_ClusterProtocolSelection_value = map[string]int32{
+ "USE_CONFIGURED_PROTOCOL": 0,
+ "USE_DOWNSTREAM_PROTOCOL": 1,
+ }
+)
+
+func (x Cluster_ClusterProtocolSelection) Enum() *Cluster_ClusterProtocolSelection {
+ p := new(Cluster_ClusterProtocolSelection)
+ *p = x
+ return p
+}
+
+func (x Cluster_ClusterProtocolSelection) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_ClusterProtocolSelection) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[3].Descriptor()
+}
+
+func (Cluster_ClusterProtocolSelection) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[3]
+}
+
+func (x Cluster_ClusterProtocolSelection) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_ClusterProtocolSelection.Descriptor instead.
+func (Cluster_ClusterProtocolSelection) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3}
+}
+
+// If NO_FALLBACK is selected, a result
+// equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
+// any cluster endpoint may be returned (subject to policy, health checks,
+// etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
+// endpoints matching the values from the default_subset field.
+type Cluster_LbSubsetConfig_LbSubsetFallbackPolicy int32
+
+const (
+ Cluster_LbSubsetConfig_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 0
+ Cluster_LbSubsetConfig_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 1
+ Cluster_LbSubsetConfig_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetFallbackPolicy = 2
+)
+
+// Enum value maps for Cluster_LbSubsetConfig_LbSubsetFallbackPolicy.
+var (
+ Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_name = map[int32]string{
+ 0: "NO_FALLBACK",
+ 1: "ANY_ENDPOINT",
+ 2: "DEFAULT_SUBSET",
+ }
+ Cluster_LbSubsetConfig_LbSubsetFallbackPolicy_value = map[string]int32{
+ "NO_FALLBACK": 0,
+ "ANY_ENDPOINT": 1,
+ "DEFAULT_SUBSET": 2,
+ }
+)
+
+func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetFallbackPolicy {
+ p := new(Cluster_LbSubsetConfig_LbSubsetFallbackPolicy)
+ *p = x
+ return p
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[4].Descriptor()
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[4]
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetFallbackPolicy.Descriptor instead.
+func (Cluster_LbSubsetConfig_LbSubsetFallbackPolicy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0}
+}
+
+type Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy int32
+
+const (
+ // No fallback. Route metadata will be used as-is.
+ Cluster_LbSubsetConfig_METADATA_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 0
+ // A special metadata key “fallback_list“ will be used to provide variants of metadata to try.
+ // Value of “fallback_list“ key has to be a list. Every list element has to be a struct - it will
+ // be merged with route metadata, overriding keys that appear in both places.
+ // “fallback_list“ entries will be used in order until a host is found.
+ //
+ // “fallback_list“ key itself is removed from metadata before subset load balancing is performed.
+ //
+ // Example:
+ //
+ // for metadata:
+ //
+ // .. code-block:: yaml
+ //
+ // version: 1.0
+ // fallback_list:
+ // - version: 2.0
+ // hardware: c64
+ // - hardware: c32
+ // - version: 3.0
+ //
+ // at first, metadata:
+ //
+ // .. code-block:: json
+ //
+ // {"version": "2.0", "hardware": "c64"}
+ //
+ // will be used for load balancing. If no host is found, metadata:
+ //
+ // .. code-block:: json
+ //
+ // {"version": "1.0", "hardware": "c32"}
+ //
+ // is next to try. If it still results in no host, finally metadata:
+ //
+ // .. code-block:: json
+ //
+ // {"version": "3.0"}
+ //
+ // is used.
+ Cluster_LbSubsetConfig_FALLBACK_LIST Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy = 1
+)
+
+// Enum value maps for Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy.
+var (
+ Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_name = map[int32]string{
+ 0: "METADATA_NO_FALLBACK",
+ 1: "FALLBACK_LIST",
+ }
+ Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy_value = map[string]int32{
+ "METADATA_NO_FALLBACK": 0,
+ "FALLBACK_LIST": 1,
+ }
+)
+
+func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy {
+ p := new(Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy)
+ *p = x
+ return p
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[5].Descriptor()
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[5]
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy.Descriptor instead.
+func (Cluster_LbSubsetConfig_LbSubsetMetadataFallbackPolicy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 1}
+}
+
+// Allows to override top level fallback policy per selector.
+type Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy int32
+
+const (
+ // If NOT_DEFINED top level config fallback policy is used instead.
+ Cluster_LbSubsetConfig_LbSubsetSelector_NOT_DEFINED Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 0
+ // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.
+ Cluster_LbSubsetConfig_LbSubsetSelector_NO_FALLBACK Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 1
+ // If ANY_ENDPOINT is selected, any cluster endpoint may be returned
+ // (subject to policy, health checks, etc).
+ Cluster_LbSubsetConfig_LbSubsetSelector_ANY_ENDPOINT Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 2
+ // If DEFAULT_SUBSET is selected, load balancing is performed over the
+ // endpoints matching the values from the default_subset field.
+ Cluster_LbSubsetConfig_LbSubsetSelector_DEFAULT_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 3
+ // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata
+ // keys reduced to
+ // :ref:`fallback_keys_subset`.
+ // It allows for a fallback to a different, less specific selector if some of the keys of
+ // the selector are considered optional.
+ Cluster_LbSubsetConfig_LbSubsetSelector_KEYS_SUBSET Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy = 4
+)
+
+// Enum value maps for Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy.
+var (
+ Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_name = map[int32]string{
+ 0: "NOT_DEFINED",
+ 1: "NO_FALLBACK",
+ 2: "ANY_ENDPOINT",
+ 3: "DEFAULT_SUBSET",
+ 4: "KEYS_SUBSET",
+ }
+ Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy_value = map[string]int32{
+ "NOT_DEFINED": 0,
+ "NO_FALLBACK": 1,
+ "ANY_ENDPOINT": 2,
+ "DEFAULT_SUBSET": 3,
+ "KEYS_SUBSET": 4,
+ }
+)
+
+func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Enum() *Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy {
+ p := new(Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy)
+ *p = x
+ return p
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[6].Descriptor()
+}
+
+func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[6]
+}
+
+func (x Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy.Descriptor instead.
+func (Cluster_LbSubsetConfig_LbSubsetSelector_LbSubsetSelectorFallbackPolicy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 3, 0, 0}
+}
+
+// The hash function used to hash hosts onto the ketama ring.
+type Cluster_RingHashLbConfig_HashFunction int32
+
+const (
+ // Use `xxHash `_, this is the default hash function.
+ Cluster_RingHashLbConfig_XX_HASH Cluster_RingHashLbConfig_HashFunction = 0
+ // Use `MurmurHash2 `_, this is compatible with
+ // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled
+ // on Linux and not macOS.
+ Cluster_RingHashLbConfig_MURMUR_HASH_2 Cluster_RingHashLbConfig_HashFunction = 1
+)
+
+// Enum value maps for Cluster_RingHashLbConfig_HashFunction.
+var (
+ Cluster_RingHashLbConfig_HashFunction_name = map[int32]string{
+ 0: "XX_HASH",
+ 1: "MURMUR_HASH_2",
+ }
+ Cluster_RingHashLbConfig_HashFunction_value = map[string]int32{
+ "XX_HASH": 0,
+ "MURMUR_HASH_2": 1,
+ }
+)
+
+func (x Cluster_RingHashLbConfig_HashFunction) Enum() *Cluster_RingHashLbConfig_HashFunction {
+ p := new(Cluster_RingHashLbConfig_HashFunction)
+ *p = x
+ return p
+}
+
+func (x Cluster_RingHashLbConfig_HashFunction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Cluster_RingHashLbConfig_HashFunction) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[7].Descriptor()
+}
+
+func (Cluster_RingHashLbConfig_HashFunction) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[7]
+}
+
+func (x Cluster_RingHashLbConfig_HashFunction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Cluster_RingHashLbConfig_HashFunction.Descriptor instead.
+func (Cluster_RingHashLbConfig_HashFunction) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{1, 7, 0}
+}
+
+type UpstreamConnectionOptions_FirstAddressFamilyVersion int32
+
+const (
+ // respect the native ranking of destination ip addresses returned from dns
+ // resolution
+ UpstreamConnectionOptions_DEFAULT UpstreamConnectionOptions_FirstAddressFamilyVersion = 0
+ UpstreamConnectionOptions_V4 UpstreamConnectionOptions_FirstAddressFamilyVersion = 1
+ UpstreamConnectionOptions_V6 UpstreamConnectionOptions_FirstAddressFamilyVersion = 2
+)
+
+// Enum value maps for UpstreamConnectionOptions_FirstAddressFamilyVersion.
+var (
+ UpstreamConnectionOptions_FirstAddressFamilyVersion_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "V4",
+ 2: "V6",
+ }
+ UpstreamConnectionOptions_FirstAddressFamilyVersion_value = map[string]int32{
+ "DEFAULT": 0,
+ "V4": 1,
+ "V6": 2,
+ }
+)
+
+func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Enum() *UpstreamConnectionOptions_FirstAddressFamilyVersion {
+ p := new(UpstreamConnectionOptions_FirstAddressFamilyVersion)
+ *p = x
+ return p
+}
+
+func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_cluster_v3_cluster_proto_enumTypes[8].Descriptor()
+}
+
+func (UpstreamConnectionOptions_FirstAddressFamilyVersion) Type() protoreflect.EnumType {
+ return &file_envoy_config_cluster_v3_cluster_proto_enumTypes[8]
+}
+
+func (x UpstreamConnectionOptions_FirstAddressFamilyVersion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UpstreamConnectionOptions_FirstAddressFamilyVersion.Descriptor instead.
+func (UpstreamConnectionOptions_FirstAddressFamilyVersion) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// Cluster list collections. Entries are “Cluster“ resources or references.
+// [#not-implemented-hide:]
+type ClusterCollection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries *v3.CollectionEntry `protobuf:"bytes,1,opt,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *ClusterCollection) Reset() {
+ *x = ClusterCollection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterCollection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterCollection) ProtoMessage() {}
+
+func (x *ClusterCollection) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_cluster_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterCollection.ProtoReflect.Descriptor instead.
+func (*ClusterCollection) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_cluster_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ClusterCollection) GetEntries() *v3.CollectionEntry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+// Configuration for a single upstream cluster.
+// [#next-free-field: 59]
+type Cluster struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configuration to use different transport sockets for different endpoints. The entry of
+ // “envoy.transport_socket_match“ in the :ref:`LbEndpoint.Metadata
+ // ` is used to match against the
+ // transport sockets as they appear in the list. If a match is not found, the search continues in
+ // :ref:`LocalityLbEndpoints.Metadata
+ // `. The first :ref:`match
+ // ` is used. For example, with
+ // the following match
+ //
+ // .. code-block:: yaml
+ //
+ // transport_socket_matches:
+ // - name: "enableMTLS"
+ // match:
+ // acceptMTLS: true
+ // transport_socket:
+ // name: envoy.transport_sockets.tls
+ // config: { ... } # tls socket configuration
+ // - name: "defaultToPlaintext"
+ // match: {}
+ // transport_socket:
+ // name: envoy.transport_sockets.raw_buffer
+ //
+ // Connections to the endpoints whose metadata value under “envoy.transport_socket_match“
+ // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration.
+ //
+ // If a :ref:`socket match ` with empty match
+ // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext"
+ // socket match in case above.
+ //
+ // If an endpoint metadata's value under “envoy.transport_socket_match“ does not match any
+ // “TransportSocketMatch“, the locality metadata is then checked for a match. Barring any
+ // matches in the endpoint or locality metadata, the socket configuration fallbacks to use the
+ // “tls_context“ or “transport_socket“ specified in this cluster.
+ //
+ // This field allows gradual and flexible transport socket configuration changes.
+ //
+ // The metadata of endpoints in EDS can indicate transport socket capabilities. For example,
+ // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true",
+ // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic
+ // has "acceptPlaintext": "true" metadata information.
+ //
+ // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS
+ // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding
+ // “TransportSocketMatch“ in this field. Other client Envoys receive CDS without
+ // “transport_socket_match“ set, and still send plain text traffic to the same cluster.
+ //
+ // This field can be used to specify custom transport socket configurations for health
+ // checks by adding matching key/value pairs in a health check's
+ // :ref:`transport socket match criteria ` field.
+ //
+ // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]
+ TransportSocketMatches []*Cluster_TransportSocketMatch `protobuf:"bytes,43,rep,name=transport_socket_matches,json=transportSocketMatches,proto3" json:"transport_socket_matches,omitempty"`
+ // Supplies the name of the cluster which must be unique across all clusters.
+ // The cluster name is used when emitting
+ // :ref:`statistics ` if :ref:`alt_stat_name
+ // ` is not provided.
+ // Any “:“ in the cluster name will be converted to “_“ when emitting statistics.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional alternative to the cluster name to be used for observability. This name is used
+ // emitting stats for the cluster and access logging the cluster name. This will appear as
+ // additional information in configuration dumps of a cluster's current status as
+ // :ref:`observability_name `
+ // and as an additional tag "upstream_cluster.name" while tracing. Note: Any “:“ in the name
+ // will be converted to “_“ when emitting statistics. This should not be confused with
+ // :ref:`Router Filter Header `.
+ AltStatName string `protobuf:"bytes,28,opt,name=alt_stat_name,json=altStatName,proto3" json:"alt_stat_name,omitempty"`
+ // Types that are assignable to ClusterDiscoveryType:
+ //
+ // *Cluster_Type
+ // *Cluster_ClusterType
+ ClusterDiscoveryType isCluster_ClusterDiscoveryType `protobuf_oneof:"cluster_discovery_type"`
+ // Configuration to use for EDS updates for the Cluster.
+ EdsClusterConfig *Cluster_EdsClusterConfig `protobuf:"bytes,3,opt,name=eds_cluster_config,json=edsClusterConfig,proto3" json:"eds_cluster_config,omitempty"`
+ // The timeout for new network connections to hosts in the cluster.
+ // If not set, a default value of 5s will be used.
+ ConnectTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=connect_timeout,json=connectTimeout,proto3" json:"connect_timeout,omitempty"`
+ // Soft limit on size of the cluster’s connections read and write buffers. If
+ // unspecified, an implementation defined default is applied (1MiB).
+ PerConnectionBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=per_connection_buffer_limit_bytes,json=perConnectionBufferLimitBytes,proto3" json:"per_connection_buffer_limit_bytes,omitempty"`
+ // The :ref:`load balancer type ` to use
+ // when picking a host in the cluster.
+ LbPolicy Cluster_LbPolicy `protobuf:"varint,6,opt,name=lb_policy,json=lbPolicy,proto3,enum=envoy.config.cluster.v3.Cluster_LbPolicy" json:"lb_policy,omitempty"`
+ // Setting this is required for specifying members of
+ // :ref:`STATIC`,
+ // :ref:`STRICT_DNS`
+ // or :ref:`LOGICAL_DNS` clusters.
+ // This field supersedes the “hosts“ field in the v2 API.
+ //
+ // .. attention::
+ //
+ // Setting this allows non-EDS cluster types to contain embedded EDS equivalent
+ // :ref:`endpoint assignments`.
+ LoadAssignment *v31.ClusterLoadAssignment `protobuf:"bytes,33,opt,name=load_assignment,json=loadAssignment,proto3" json:"load_assignment,omitempty"`
+ // Optional :ref:`active health checking `
+ // configuration for the cluster. If no
+ // configuration is specified no health checking will be done and all cluster
+ // members will be considered healthy at all times.
+ HealthChecks []*v32.HealthCheck `protobuf:"bytes,8,rep,name=health_checks,json=healthChecks,proto3" json:"health_checks,omitempty"`
+ // Optional maximum requests for a single upstream connection. This parameter
+ // is respected by both the HTTP/1.1 and HTTP/2 connection pool
+ // implementations. If not specified, there is no limit. Setting this
+ // parameter to 1 will effectively disable keep alive.
+ //
+ // .. attention::
+ //
+ // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"`
+ // Optional :ref:`circuit breaking ` for the cluster.
+ CircuitBreakers *CircuitBreakers `protobuf:"bytes,10,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"`
+ // HTTP protocol options that are applied only to upstream HTTP connections.
+ // These options apply to all HTTP versions.
+ // This has been deprecated in favor of
+ // :ref:`upstream_http_protocol_options `
+ // in the :ref:`http_protocol_options ` message.
+ // upstream_http_protocol_options can be set via the cluster's
+ // :ref:`extension_protocol_options`.
+ // See :ref:`upstream_http_protocol_options
+ // `
+ // for example usage.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ UpstreamHttpProtocolOptions *v32.UpstreamHttpProtocolOptions `protobuf:"bytes,46,opt,name=upstream_http_protocol_options,json=upstreamHttpProtocolOptions,proto3" json:"upstream_http_protocol_options,omitempty"`
+ // Additional options when handling HTTP requests upstream. These options will be applicable to
+ // both HTTP1 and HTTP2 requests.
+ // This has been deprecated in favor of
+ // :ref:`common_http_protocol_options `
+ // in the :ref:`http_protocol_options ` message.
+ // common_http_protocol_options can be set via the cluster's
+ // :ref:`extension_protocol_options`.
+ // See :ref:`upstream_http_protocol_options
+ // `
+ // for example usage.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ CommonHttpProtocolOptions *v32.HttpProtocolOptions `protobuf:"bytes,29,opt,name=common_http_protocol_options,json=commonHttpProtocolOptions,proto3" json:"common_http_protocol_options,omitempty"`
+ // Additional options when handling HTTP1 requests.
+ // This has been deprecated in favor of http_protocol_options fields in the
+ // :ref:`http_protocol_options ` message.
+ // http_protocol_options can be set via the cluster's
+ // :ref:`extension_protocol_options`.
+ // See :ref:`upstream_http_protocol_options
+ // `
+ // for example usage.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ HttpProtocolOptions *v32.Http1ProtocolOptions `protobuf:"bytes,13,opt,name=http_protocol_options,json=httpProtocolOptions,proto3" json:"http_protocol_options,omitempty"`
+ // Even if default HTTP2 protocol options are desired, this field must be
+ // set so that Envoy will assume that the upstream supports HTTP/2 when
+ // making new HTTP connection pool connections. Currently, Envoy only
+ // supports prior knowledge for upstream connections. Even if TLS is used
+ // with ALPN, “http2_protocol_options“ must be specified. As an aside this allows HTTP/2
+ // connections to happen over plain text.
+ // This has been deprecated in favor of http2_protocol_options fields in the
+ // :ref:`http_protocol_options `
+ // message. http2_protocol_options can be set via the cluster's
+ // :ref:`extension_protocol_options`.
+ // See :ref:`upstream_http_protocol_options
+ // `
+ // for example usage.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ Http2ProtocolOptions *v32.Http2ProtocolOptions `protobuf:"bytes,14,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"`
+ // The extension_protocol_options field is used to provide extension-specific protocol options
+ // for upstream connections. The key should match the extension filter name, such as
+ // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on
+ // specific options.
+ // [#next-major-version: make this a list of typed extensions.]
+ // [#extension-category: envoy.upstream_options]
+ TypedExtensionProtocolOptions map[string]*anypb.Any `protobuf:"bytes,36,rep,name=typed_extension_protocol_options,json=typedExtensionProtocolOptions,proto3" json:"typed_extension_protocol_options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // If the DNS refresh rate is specified and the cluster type is either
+ // :ref:`STRICT_DNS`,
+ // or :ref:`LOGICAL_DNS`,
+ // this value is used as the cluster’s DNS refresh
+ // rate. The value configured must be at least 1ms. If this setting is not specified, the
+ // value defaults to 5000ms. For cluster types other than
+ // :ref:`STRICT_DNS`
+ // and :ref:`LOGICAL_DNS`
+ // this setting is ignored.
+ // This field is deprecated in favor of using the :ref:`cluster_type`
+ // extension point and configuring it with :ref:`DnsCluster`.
+ // If :ref:`cluster_type` is configured with
+ // :ref:`DnsCluster`, this field will be ignored.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/cluster/v3/cluster.proto.
+ DnsRefreshRate *durationpb.Duration `protobuf:"bytes,16,opt,name=dns_refresh_rate,json=dnsRefreshRate,proto3" json:"dns_refresh_rate,omitempty"`
+ // DNS jitter can be optionally specified if the cluster type is either
+ // :ref:`STRICT_DNS`,
+ // or :ref:`LOGICAL_DNS`.
+ // DNS jitter causes the cluster to refresh DNS entries later by a random amount of time to avoid a
+ // stampede of DNS requests. This value sets the upper bound (exclusive) for the random amount.
+ // There will be no jitter if this value is omitted. For cluster types other than
+ // :ref:`STRICT_DNS`
+ // and :ref:`LOGICAL_DNS`
+ // this setting is ignored.
+ // This field is deprecated in favor of using the :ref:`cluster_type